diff --git a/.gitignore b/.gitignore index ce3af2b1c..97c6d25d1 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ zig-cache/ build/ build-release/ build-windows/ +build-llvm-5/ /.cproject /.project /.settings/ diff --git a/CMakeLists.txt b/CMakeLists.txt index 45b7e45ef..2fd8840cd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -64,76 +64,6 @@ set(ZIG_SOURCES "${CMAKE_SOURCE_DIR}/src/zig_llvm.cpp" ) -set(C_HEADERS - "${CMAKE_SOURCE_DIR}/c_headers/Intrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/__stddef_max_align_t.h" - "${CMAKE_SOURCE_DIR}/c_headers/__wmmintrin_aes.h" - "${CMAKE_SOURCE_DIR}/c_headers/__wmmintrin_pclmul.h" - "${CMAKE_SOURCE_DIR}/c_headers/adxintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/ammintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/arm_acle.h" - "${CMAKE_SOURCE_DIR}/c_headers/avx2intrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/avx512bwintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/avx512cdintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/avx512dqintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/avx512erintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/avx512fintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/avx512vlbwintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/avx512vldqintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/avx512vlintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/avxintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/bmi2intrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/bmiintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/cpuid.h" - "${CMAKE_SOURCE_DIR}/c_headers/cuda_builtin_vars.h" - "${CMAKE_SOURCE_DIR}/c_headers/emmintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/f16cintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/float.h" - "${CMAKE_SOURCE_DIR}/c_headers/fma4intrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/fmaintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/fxsrintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/htmintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/htmxlintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/ia32intrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/immintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/inttypes.h" - "${CMAKE_SOURCE_DIR}/c_headers/iso646.h" - "${CMAKE_SOURCE_DIR}/c_headers/limits.h" - "${CMAKE_SOURCE_DIR}/c_headers/lzcntintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/mm3dnow.h" - "${CMAKE_SOURCE_DIR}/c_headers/mm_malloc.h" - "${CMAKE_SOURCE_DIR}/c_headers/mmintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/nmmintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/pmmintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/popcntintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/prfchwintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/rdseedintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/rtmintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/s390intrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/shaintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/smmintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/stdalign.h" - "${CMAKE_SOURCE_DIR}/c_headers/stdarg.h" - "${CMAKE_SOURCE_DIR}/c_headers/stdatomic.h" - "${CMAKE_SOURCE_DIR}/c_headers/stdbool.h" - "${CMAKE_SOURCE_DIR}/c_headers/stddef.h" - "${CMAKE_SOURCE_DIR}/c_headers/stdint.h" - "${CMAKE_SOURCE_DIR}/c_headers/stdnoreturn.h" - "${CMAKE_SOURCE_DIR}/c_headers/tbmintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/tgmath.h" - "${CMAKE_SOURCE_DIR}/c_headers/tmmintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/unwind.h" - "${CMAKE_SOURCE_DIR}/c_headers/vadefs.h" - "${CMAKE_SOURCE_DIR}/c_headers/varargs.h" - "${CMAKE_SOURCE_DIR}/c_headers/vecintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/wmmintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/x86intrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/xmmintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/xopintrin.h" - "${CMAKE_SOURCE_DIR}/c_headers/xtestintrin.h" -) - - set(ZIG_HOST_LINK_VERSION) if (APPLE) set(LD_V_OUTPUT) @@ -198,7 +128,99 @@ if(MINGW) endif() install(TARGETS zig DESTINATION bin) -install(FILES ${C_HEADERS} DESTINATION ${C_HEADERS_DEST}) +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/__clang_cuda_builtin_vars.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/__clang_cuda_cmath.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/__clang_cuda_complex_builtins.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/__clang_cuda_intrinsics.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/__clang_cuda_math_forward_declares.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/__clang_cuda_runtime_wrapper.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/__stddef_max_align_t.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/__wmmintrin_aes.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/__wmmintrin_pclmul.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/adxintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/altivec.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/ammintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/arm_acle.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/arm_neon.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/armintr.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/avx2intrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/avx512bwintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/avx512cdintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/avx512dqintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/avx512erintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/avx512fintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/avx512ifmaintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/avx512ifmavlintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/avx512pfintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/avx512vbmiintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/avx512vbmivlintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/avx512vlbwintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/avx512vlcdintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/avx512vldqintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/avx512vlintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/avxintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/bmi2intrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/bmiintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/clflushoptintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/cpuid.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/cuda_wrappers/algorithm" DESTINATION "${C_HEADERS_DEST}/cuda_wrappers") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/cuda_wrappers/complex" DESTINATION "${C_HEADERS_DEST}/cuda_wrappers") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/cuda_wrappers/new" DESTINATION "${C_HEADERS_DEST}/cuda_wrappers") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/emmintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/f16cintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/float.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/fma4intrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/fmaintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/fxsrintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/htmintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/htmxlintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/ia32intrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/immintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/intrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/inttypes.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/iso646.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/limits.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/lzcntintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/mm3dnow.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/mm_malloc.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/mmintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/module.modulemap" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/msa.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/mwaitxintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/nmmintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/opencl-c.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/pkuintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/pmmintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/popcntintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/prfchwintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/rdseedintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/rtmintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/s390intrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/shaintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/smmintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/stdalign.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/stdarg.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/stdatomic.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/stdbool.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/stddef.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/stdint.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/stdnoreturn.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/tbmintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/tgmath.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/tmmintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/unwind.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/vadefs.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/varargs.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/vecintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/wmmintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/x86intrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/xmmintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/xopintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/xsavecintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/xsaveintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/xsaveoptintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/xsavesintrin.h" DESTINATION "${C_HEADERS_DEST}") +install(FILES "${CMAKE_SOURCE_DIR}/c_headers/xtestintrin.h" DESTINATION "${C_HEADERS_DEST}") install(FILES "${CMAKE_SOURCE_DIR}/std/array_list.zig" DESTINATION "${ZIG_STD_DEST}") install(FILES "${CMAKE_SOURCE_DIR}/std/base64.zig" DESTINATION "${ZIG_STD_DEST}") diff --git a/c_headers/cuda_builtin_vars.h b/c_headers/__clang_cuda_builtin_vars.h similarity index 69% rename from c_headers/cuda_builtin_vars.h rename to c_headers/__clang_cuda_builtin_vars.h index 901356b3d..6f5eb9c78 100644 --- a/c_headers/cuda_builtin_vars.h +++ b/c_headers/__clang_cuda_builtin_vars.h @@ -24,16 +24,20 @@ #ifndef __CUDA_BUILTIN_VARS_H #define __CUDA_BUILTIN_VARS_H +// Forward declares from vector_types.h. +struct uint3; +struct dim3; + // The file implements built-in CUDA variables using __declspec(property). // https://msdn.microsoft.com/en-us/library/yhfk0thd.aspx // All read accesses of built-in variable fields get converted into calls to a -// getter function which in turn would call appropriate builtin to fetch the +// getter function which in turn calls the appropriate builtin to fetch the // value. // // Example: // int x = threadIdx.x; // IR output: -// %0 = call i32 @llvm.ptx.read.tid.x() #3 +// %0 = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() #3 // PTX output: // mov.u32 %r2, %tid.x; @@ -60,33 +64,45 @@ __attribute__((device)) TypeName *operator&() const __DELETE struct __cuda_builtin_threadIdx_t { - __CUDA_DEVICE_BUILTIN(x,__builtin_ptx_read_tid_x()); - __CUDA_DEVICE_BUILTIN(y,__builtin_ptx_read_tid_y()); - __CUDA_DEVICE_BUILTIN(z,__builtin_ptx_read_tid_z()); + __CUDA_DEVICE_BUILTIN(x,__nvvm_read_ptx_sreg_tid_x()); + __CUDA_DEVICE_BUILTIN(y,__nvvm_read_ptx_sreg_tid_y()); + __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_tid_z()); + // threadIdx should be convertible to uint3 (in fact in nvcc, it *is* a + // uint3). This function is defined after we pull in vector_types.h. + __attribute__((device)) operator uint3() const; private: __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_threadIdx_t); }; struct __cuda_builtin_blockIdx_t { - __CUDA_DEVICE_BUILTIN(x,__builtin_ptx_read_ctaid_x()); - __CUDA_DEVICE_BUILTIN(y,__builtin_ptx_read_ctaid_y()); - __CUDA_DEVICE_BUILTIN(z,__builtin_ptx_read_ctaid_z()); + __CUDA_DEVICE_BUILTIN(x,__nvvm_read_ptx_sreg_ctaid_x()); + __CUDA_DEVICE_BUILTIN(y,__nvvm_read_ptx_sreg_ctaid_y()); + __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_ctaid_z()); + // blockIdx should be convertible to uint3 (in fact in nvcc, it *is* a + // uint3). This function is defined after we pull in vector_types.h. + __attribute__((device)) operator uint3() const; private: __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_blockIdx_t); }; struct __cuda_builtin_blockDim_t { - __CUDA_DEVICE_BUILTIN(x,__builtin_ptx_read_ntid_x()); - __CUDA_DEVICE_BUILTIN(y,__builtin_ptx_read_ntid_y()); - __CUDA_DEVICE_BUILTIN(z,__builtin_ptx_read_ntid_z()); + __CUDA_DEVICE_BUILTIN(x,__nvvm_read_ptx_sreg_ntid_x()); + __CUDA_DEVICE_BUILTIN(y,__nvvm_read_ptx_sreg_ntid_y()); + __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_ntid_z()); + // blockDim should be convertible to dim3 (in fact in nvcc, it *is* a + // dim3). This function is defined after we pull in vector_types.h. + __attribute__((device)) operator dim3() const; private: __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_blockDim_t); }; struct __cuda_builtin_gridDim_t { - __CUDA_DEVICE_BUILTIN(x,__builtin_ptx_read_nctaid_x()); - __CUDA_DEVICE_BUILTIN(y,__builtin_ptx_read_nctaid_y()); - __CUDA_DEVICE_BUILTIN(z,__builtin_ptx_read_nctaid_z()); + __CUDA_DEVICE_BUILTIN(x,__nvvm_read_ptx_sreg_nctaid_x()); + __CUDA_DEVICE_BUILTIN(y,__nvvm_read_ptx_sreg_nctaid_y()); + __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_nctaid_z()); + // gridDim should be convertible to dim3 (in fact in nvcc, it *is* a + // dim3). This function is defined after we pull in vector_types.h. + __attribute__((device)) operator dim3() const; private: __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_gridDim_t); }; diff --git a/c_headers/__clang_cuda_cmath.h b/c_headers/__clang_cuda_cmath.h new file mode 100644 index 000000000..9bef82611 --- /dev/null +++ b/c_headers/__clang_cuda_cmath.h @@ -0,0 +1,487 @@ +/*===---- __clang_cuda_cmath.h - Device-side CUDA cmath support ------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __CLANG_CUDA_CMATH_H__ +#define __CLANG_CUDA_CMATH_H__ +#ifndef __CUDA__ +#error "This file is for CUDA compilation only." +#endif + +#include + +// CUDA lets us use various std math functions on the device side. This file +// works in concert with __clang_cuda_math_forward_declares.h to make this work. +// +// Specifically, the forward-declares header declares __device__ overloads for +// these functions in the global namespace, then pulls them into namespace std +// with 'using' statements. Then this file implements those functions, after +// their implementations have been pulled in. +// +// It's important that we declare the functions in the global namespace and pull +// them into namespace std with using statements, as opposed to simply declaring +// these functions in namespace std, because our device functions need to +// overload the standard library functions, which may be declared in the global +// namespace or in std, depending on the degree of conformance of the stdlib +// implementation. Declaring in the global namespace and pulling into namespace +// std covers all of the known knowns. + +#define __DEVICE__ static __device__ __inline__ __attribute__((always_inline)) + +__DEVICE__ long long abs(long long __n) { return ::llabs(__n); } +__DEVICE__ long abs(long __n) { return ::labs(__n); } +__DEVICE__ float abs(float __x) { return ::fabsf(__x); } +__DEVICE__ double abs(double __x) { return ::fabs(__x); } +__DEVICE__ float acos(float __x) { return ::acosf(__x); } +__DEVICE__ float asin(float __x) { return ::asinf(__x); } +__DEVICE__ float atan(float __x) { return ::atanf(__x); } +__DEVICE__ float atan2(float __x, float __y) { return ::atan2f(__x, __y); } +__DEVICE__ float ceil(float __x) { return ::ceilf(__x); } +__DEVICE__ float cos(float __x) { return ::cosf(__x); } +__DEVICE__ float cosh(float __x) { return ::coshf(__x); } +__DEVICE__ float exp(float __x) { return ::expf(__x); } +__DEVICE__ float fabs(float __x) { return ::fabsf(__x); } +__DEVICE__ float floor(float __x) { return ::floorf(__x); } +__DEVICE__ float fmod(float __x, float __y) { return ::fmodf(__x, __y); } +__DEVICE__ int fpclassify(float __x) { + return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL, + FP_ZERO, __x); +} +__DEVICE__ int fpclassify(double __x) { + return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL, + FP_ZERO, __x); +} +__DEVICE__ float frexp(float __arg, int *__exp) { + return ::frexpf(__arg, __exp); +} + +// For inscrutable reasons, the CUDA headers define these functions for us on +// Windows. +#ifndef _MSC_VER +__DEVICE__ bool isinf(float __x) { return ::__isinff(__x); } +__DEVICE__ bool isinf(double __x) { return ::__isinf(__x); } +__DEVICE__ bool isfinite(float __x) { return ::__finitef(__x); } +// For inscrutable reasons, __finite(), the double-precision version of +// __finitef, does not exist when compiling for MacOS. __isfinited is available +// everywhere and is just as good. +__DEVICE__ bool isfinite(double __x) { return ::__isfinited(__x); } +__DEVICE__ bool isnan(float __x) { return ::__isnanf(__x); } +__DEVICE__ bool isnan(double __x) { return ::__isnan(__x); } +#endif + +__DEVICE__ bool isgreater(float __x, float __y) { + return __builtin_isgreater(__x, __y); +} +__DEVICE__ bool isgreater(double __x, double __y) { + return __builtin_isgreater(__x, __y); +} +__DEVICE__ bool isgreaterequal(float __x, float __y) { + return __builtin_isgreaterequal(__x, __y); +} +__DEVICE__ bool isgreaterequal(double __x, double __y) { + return __builtin_isgreaterequal(__x, __y); +} +__DEVICE__ bool isless(float __x, float __y) { + return __builtin_isless(__x, __y); +} +__DEVICE__ bool isless(double __x, double __y) { + return __builtin_isless(__x, __y); +} +__DEVICE__ bool islessequal(float __x, float __y) { + return __builtin_islessequal(__x, __y); +} +__DEVICE__ bool islessequal(double __x, double __y) { + return __builtin_islessequal(__x, __y); +} +__DEVICE__ bool islessgreater(float __x, float __y) { + return __builtin_islessgreater(__x, __y); +} +__DEVICE__ bool islessgreater(double __x, double __y) { + return __builtin_islessgreater(__x, __y); +} +__DEVICE__ bool isnormal(float __x) { return __builtin_isnormal(__x); } +__DEVICE__ bool isnormal(double __x) { return __builtin_isnormal(__x); } +__DEVICE__ bool isunordered(float __x, float __y) { + return __builtin_isunordered(__x, __y); +} +__DEVICE__ bool isunordered(double __x, double __y) { + return __builtin_isunordered(__x, __y); +} +__DEVICE__ float ldexp(float __arg, int __exp) { + return ::ldexpf(__arg, __exp); +} +__DEVICE__ float log(float __x) { return ::logf(__x); } +__DEVICE__ float log10(float __x) { return ::log10f(__x); } +__DEVICE__ float modf(float __x, float *__iptr) { return ::modff(__x, __iptr); } +__DEVICE__ float nexttoward(float __from, double __to) { + return __builtin_nexttowardf(__from, __to); +} +__DEVICE__ double nexttoward(double __from, double __to) { + return __builtin_nexttoward(__from, __to); +} +__DEVICE__ float nexttowardf(float __from, double __to) { + return __builtin_nexttowardf(__from, __to); +} +__DEVICE__ float pow(float __base, float __exp) { + return ::powf(__base, __exp); +} +__DEVICE__ float pow(float __base, int __iexp) { + return ::powif(__base, __iexp); +} +__DEVICE__ double pow(double __base, int __iexp) { + return ::powi(__base, __iexp); +} +__DEVICE__ bool signbit(float __x) { return ::__signbitf(__x); } +__DEVICE__ bool signbit(double __x) { return ::__signbitd(__x); } +__DEVICE__ float sin(float __x) { return ::sinf(__x); } +__DEVICE__ float sinh(float __x) { return ::sinhf(__x); } +__DEVICE__ float sqrt(float __x) { return ::sqrtf(__x); } +__DEVICE__ float tan(float __x) { return ::tanf(__x); } +__DEVICE__ float tanh(float __x) { return ::tanhf(__x); } + +// Now we've defined everything we promised we'd define in +// __clang_cuda_math_forward_declares.h. We need to do two additional things to +// fix up our math functions. +// +// 1) Define __device__ overloads for e.g. sin(int). The CUDA headers define +// only sin(float) and sin(double), which means that e.g. sin(0) is +// ambiguous. +// +// 2) Pull the __device__ overloads of "foobarf" math functions into namespace +// std. These are defined in the CUDA headers in the global namespace, +// independent of everything else we've done here. + +// We can't use std::enable_if, because we want to be pre-C++11 compatible. But +// we go ahead and unconditionally define functions that are only available when +// compiling for C++11 to match the behavior of the CUDA headers. +template +struct __clang_cuda_enable_if {}; + +template struct __clang_cuda_enable_if { + typedef __T type; +}; + +// Defines an overload of __fn that accepts one integral argument, calls +// __fn((double)x), and returns __retty. +#define __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(__retty, __fn) \ + template \ + __DEVICE__ \ + typename __clang_cuda_enable_if::is_integer, \ + __retty>::type \ + __fn(__T __x) { \ + return ::__fn((double)__x); \ + } + +// Defines an overload of __fn that accepts one two arithmetic arguments, calls +// __fn((double)x, (double)y), and returns a double. +// +// Note this is different from OVERLOAD_1, which generates an overload that +// accepts only *integral* arguments. +#define __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(__retty, __fn) \ + template \ + __DEVICE__ typename __clang_cuda_enable_if< \ + std::numeric_limits<__T1>::is_specialized && \ + std::numeric_limits<__T2>::is_specialized, \ + __retty>::type \ + __fn(__T1 __x, __T2 __y) { \ + return __fn((double)__x, (double)__y); \ + } + +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, acos) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, acosh) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, asin) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, asinh) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, atan) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, atan2); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, atanh) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, cbrt) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, ceil) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, copysign); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, cos) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, cosh) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, erf) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, erfc) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, exp) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, exp2) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, expm1) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, fabs) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fdim); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, floor) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fmax); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fmin); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fmod); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(int, fpclassify) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, hypot); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(int, ilogb) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isfinite) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isgreater); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isgreaterequal); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isinf); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isless); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, islessequal); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, islessgreater); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isnan); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isnormal) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isunordered); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, lgamma) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log10) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log1p) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log2) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, logb) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long long, llrint) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long long, llround) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long, lrint) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long, lround) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, nearbyint); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, nextafter); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, pow); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, remainder); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, rint); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, round); +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, signbit) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, sin) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, sinh) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, sqrt) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, tan) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, tanh) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, tgamma) +__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, trunc); + +#undef __CUDA_CLANG_FN_INTEGER_OVERLOAD_1 +#undef __CUDA_CLANG_FN_INTEGER_OVERLOAD_2 + +// Overloads for functions that don't match the patterns expected by +// __CUDA_CLANG_FN_INTEGER_OVERLOAD_{1,2}. +template +__DEVICE__ typename __clang_cuda_enable_if< + std::numeric_limits<__T1>::is_specialized && + std::numeric_limits<__T2>::is_specialized && + std::numeric_limits<__T3>::is_specialized, + double>::type +fma(__T1 __x, __T2 __y, __T3 __z) { + return std::fma((double)__x, (double)__y, (double)__z); +} + +template +__DEVICE__ typename __clang_cuda_enable_if::is_integer, + double>::type +frexp(__T __x, int *__exp) { + return std::frexp((double)__x, __exp); +} + +template +__DEVICE__ typename __clang_cuda_enable_if::is_integer, + double>::type +ldexp(__T __x, int __exp) { + return std::ldexp((double)__x, __exp); +} + +template +__DEVICE__ typename __clang_cuda_enable_if::is_integer, + double>::type +nexttoward(__T __from, double __to) { + return std::nexttoward((double)__from, __to); +} + +template +__DEVICE__ typename __clang_cuda_enable_if< + std::numeric_limits<__T1>::is_specialized && + std::numeric_limits<__T2>::is_specialized, + double>::type +remquo(__T1 __x, __T2 __y, int *__quo) { + return std::remquo((double)__x, (double)__y, __quo); +} + +template +__DEVICE__ typename __clang_cuda_enable_if::is_integer, + double>::type +scalbln(__T __x, long __exp) { + return std::scalbln((double)__x, __exp); +} + +template +__DEVICE__ typename __clang_cuda_enable_if::is_integer, + double>::type +scalbn(__T __x, int __exp) { + return std::scalbn((double)__x, __exp); +} + +// We need to define these overloads in exactly the namespace our standard +// library uses (including the right inline namespace), otherwise they won't be +// picked up by other functions in the standard library (e.g. functions in +// ). Thus the ugliness below. +#ifdef _LIBCPP_BEGIN_NAMESPACE_STD +_LIBCPP_BEGIN_NAMESPACE_STD +#else +namespace std { +#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION +_GLIBCXX_BEGIN_NAMESPACE_VERSION +#endif +#endif + +// Pull the new overloads we defined above into namespace std. +using ::acos; +using ::acosh; +using ::asin; +using ::asinh; +using ::atan; +using ::atan2; +using ::atanh; +using ::cbrt; +using ::ceil; +using ::copysign; +using ::cos; +using ::cosh; +using ::erf; +using ::erfc; +using ::exp; +using ::exp2; +using ::expm1; +using ::fabs; +using ::fdim; +using ::floor; +using ::fma; +using ::fmax; +using ::fmin; +using ::fmod; +using ::fpclassify; +using ::frexp; +using ::hypot; +using ::ilogb; +using ::isfinite; +using ::isgreater; +using ::isgreaterequal; +using ::isless; +using ::islessequal; +using ::islessgreater; +using ::isnormal; +using ::isunordered; +using ::ldexp; +using ::lgamma; +using ::llrint; +using ::llround; +using ::log; +using ::log10; +using ::log1p; +using ::log2; +using ::logb; +using ::lrint; +using ::lround; +using ::nearbyint; +using ::nextafter; +using ::nexttoward; +using ::pow; +using ::remainder; +using ::remquo; +using ::rint; +using ::round; +using ::scalbln; +using ::scalbn; +using ::signbit; +using ::sin; +using ::sinh; +using ::sqrt; +using ::tan; +using ::tanh; +using ::tgamma; +using ::trunc; + +// Well this is fun: We need to pull these symbols in for libc++, but we can't +// pull them in with libstdc++, because its ::isinf and ::isnan are different +// than its std::isinf and std::isnan. +#ifndef __GLIBCXX__ +using ::isinf; +using ::isnan; +#endif + +// Finally, pull the "foobarf" functions that CUDA defines in its headers into +// namespace std. +using ::acosf; +using ::acoshf; +using ::asinf; +using ::asinhf; +using ::atan2f; +using ::atanf; +using ::atanhf; +using ::cbrtf; +using ::ceilf; +using ::copysignf; +using ::cosf; +using ::coshf; +using ::erfcf; +using ::erff; +using ::exp2f; +using ::expf; +using ::expm1f; +using ::fabsf; +using ::fdimf; +using ::floorf; +using ::fmaf; +using ::fmaxf; +using ::fminf; +using ::fmodf; +using ::frexpf; +using ::hypotf; +using ::ilogbf; +using ::ldexpf; +using ::lgammaf; +using ::llrintf; +using ::llroundf; +using ::log10f; +using ::log1pf; +using ::log2f; +using ::logbf; +using ::logf; +using ::lrintf; +using ::lroundf; +using ::modff; +using ::nearbyintf; +using ::nextafterf; +using ::nexttowardf; +using ::nexttowardf; +using ::powf; +using ::remainderf; +using ::remquof; +using ::rintf; +using ::roundf; +using ::scalblnf; +using ::scalbnf; +using ::sinf; +using ::sinhf; +using ::sqrtf; +using ::tanf; +using ::tanhf; +using ::tgammaf; +using ::truncf; + +#ifdef _LIBCPP_END_NAMESPACE_STD +_LIBCPP_END_NAMESPACE_STD +#else +#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION +_GLIBCXX_END_NAMESPACE_VERSION +#endif +} // namespace std +#endif + +#undef __DEVICE__ + +#endif diff --git a/c_headers/__clang_cuda_complex_builtins.h b/c_headers/__clang_cuda_complex_builtins.h new file mode 100644 index 000000000..beef7deff --- /dev/null +++ b/c_headers/__clang_cuda_complex_builtins.h @@ -0,0 +1,203 @@ +/*===-- __clang_cuda_complex_builtins - CUDA impls of runtime complex fns ---=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __CLANG_CUDA_COMPLEX_BUILTINS +#define __CLANG_CUDA_COMPLEX_BUILTINS + +// This header defines __muldc3, __mulsc3, __divdc3, and __divsc3. These are +// libgcc functions that clang assumes are available when compiling c99 complex +// operations. (These implementations come from libc++, and have been modified +// to work with CUDA.) + +extern "C" inline __device__ double _Complex __muldc3(double __a, double __b, + double __c, double __d) { + double __ac = __a * __c; + double __bd = __b * __d; + double __ad = __a * __d; + double __bc = __b * __c; + double _Complex z; + __real__(z) = __ac - __bd; + __imag__(z) = __ad + __bc; + if (std::isnan(__real__(z)) && std::isnan(__imag__(z))) { + int __recalc = 0; + if (std::isinf(__a) || std::isinf(__b)) { + __a = std::copysign(std::isinf(__a) ? 1 : 0, __a); + __b = std::copysign(std::isinf(__b) ? 1 : 0, __b); + if (std::isnan(__c)) + __c = std::copysign(0, __c); + if (std::isnan(__d)) + __d = std::copysign(0, __d); + __recalc = 1; + } + if (std::isinf(__c) || std::isinf(__d)) { + __c = std::copysign(std::isinf(__c) ? 1 : 0, __c); + __d = std::copysign(std::isinf(__d) ? 1 : 0, __d); + if (std::isnan(__a)) + __a = std::copysign(0, __a); + if (std::isnan(__b)) + __b = std::copysign(0, __b); + __recalc = 1; + } + if (!__recalc && (std::isinf(__ac) || std::isinf(__bd) || + std::isinf(__ad) || std::isinf(__bc))) { + if (std::isnan(__a)) + __a = std::copysign(0, __a); + if (std::isnan(__b)) + __b = std::copysign(0, __b); + if (std::isnan(__c)) + __c = std::copysign(0, __c); + if (std::isnan(__d)) + __d = std::copysign(0, __d); + __recalc = 1; + } + if (__recalc) { + // Can't use std::numeric_limits::infinity() -- that doesn't have + // a device overload (and isn't constexpr before C++11, naturally). + __real__(z) = __builtin_huge_valf() * (__a * __c - __b * __d); + __imag__(z) = __builtin_huge_valf() * (__a * __d + __b * __c); + } + } + return z; +} + +extern "C" inline __device__ float _Complex __mulsc3(float __a, float __b, + float __c, float __d) { + float __ac = __a * __c; + float __bd = __b * __d; + float __ad = __a * __d; + float __bc = __b * __c; + float _Complex z; + __real__(z) = __ac - __bd; + __imag__(z) = __ad + __bc; + if (std::isnan(__real__(z)) && std::isnan(__imag__(z))) { + int __recalc = 0; + if (std::isinf(__a) || std::isinf(__b)) { + __a = std::copysign(std::isinf(__a) ? 1 : 0, __a); + __b = std::copysign(std::isinf(__b) ? 1 : 0, __b); + if (std::isnan(__c)) + __c = std::copysign(0, __c); + if (std::isnan(__d)) + __d = std::copysign(0, __d); + __recalc = 1; + } + if (std::isinf(__c) || std::isinf(__d)) { + __c = std::copysign(std::isinf(__c) ? 1 : 0, __c); + __d = std::copysign(std::isinf(__d) ? 1 : 0, __d); + if (std::isnan(__a)) + __a = std::copysign(0, __a); + if (std::isnan(__b)) + __b = std::copysign(0, __b); + __recalc = 1; + } + if (!__recalc && (std::isinf(__ac) || std::isinf(__bd) || + std::isinf(__ad) || std::isinf(__bc))) { + if (std::isnan(__a)) + __a = std::copysign(0, __a); + if (std::isnan(__b)) + __b = std::copysign(0, __b); + if (std::isnan(__c)) + __c = std::copysign(0, __c); + if (std::isnan(__d)) + __d = std::copysign(0, __d); + __recalc = 1; + } + if (__recalc) { + __real__(z) = __builtin_huge_valf() * (__a * __c - __b * __d); + __imag__(z) = __builtin_huge_valf() * (__a * __d + __b * __c); + } + } + return z; +} + +extern "C" inline __device__ double _Complex __divdc3(double __a, double __b, + double __c, double __d) { + int __ilogbw = 0; + // Can't use std::max, because that's defined in , and we don't + // want to pull that in for every compile. The CUDA headers define + // ::max(float, float) and ::max(double, double), which is sufficient for us. + double __logbw = std::logb(max(std::abs(__c), std::abs(__d))); + if (std::isfinite(__logbw)) { + __ilogbw = (int)__logbw; + __c = std::scalbn(__c, -__ilogbw); + __d = std::scalbn(__d, -__ilogbw); + } + double __denom = __c * __c + __d * __d; + double _Complex z; + __real__(z) = std::scalbn((__a * __c + __b * __d) / __denom, -__ilogbw); + __imag__(z) = std::scalbn((__b * __c - __a * __d) / __denom, -__ilogbw); + if (std::isnan(__real__(z)) && std::isnan(__imag__(z))) { + if ((__denom == 0.0) && (!std::isnan(__a) || !std::isnan(__b))) { + __real__(z) = std::copysign(__builtin_huge_valf(), __c) * __a; + __imag__(z) = std::copysign(__builtin_huge_valf(), __c) * __b; + } else if ((std::isinf(__a) || std::isinf(__b)) && std::isfinite(__c) && + std::isfinite(__d)) { + __a = std::copysign(std::isinf(__a) ? 1.0 : 0.0, __a); + __b = std::copysign(std::isinf(__b) ? 1.0 : 0.0, __b); + __real__(z) = __builtin_huge_valf() * (__a * __c + __b * __d); + __imag__(z) = __builtin_huge_valf() * (__b * __c - __a * __d); + } else if (std::isinf(__logbw) && __logbw > 0.0 && std::isfinite(__a) && + std::isfinite(__b)) { + __c = std::copysign(std::isinf(__c) ? 1.0 : 0.0, __c); + __d = std::copysign(std::isinf(__d) ? 1.0 : 0.0, __d); + __real__(z) = 0.0 * (__a * __c + __b * __d); + __imag__(z) = 0.0 * (__b * __c - __a * __d); + } + } + return z; +} + +extern "C" inline __device__ float _Complex __divsc3(float __a, float __b, + float __c, float __d) { + int __ilogbw = 0; + float __logbw = std::logb(max(std::abs(__c), std::abs(__d))); + if (std::isfinite(__logbw)) { + __ilogbw = (int)__logbw; + __c = std::scalbn(__c, -__ilogbw); + __d = std::scalbn(__d, -__ilogbw); + } + float __denom = __c * __c + __d * __d; + float _Complex z; + __real__(z) = std::scalbn((__a * __c + __b * __d) / __denom, -__ilogbw); + __imag__(z) = std::scalbn((__b * __c - __a * __d) / __denom, -__ilogbw); + if (std::isnan(__real__(z)) && std::isnan(__imag__(z))) { + if ((__denom == 0) && (!std::isnan(__a) || !std::isnan(__b))) { + __real__(z) = std::copysign(__builtin_huge_valf(), __c) * __a; + __imag__(z) = std::copysign(__builtin_huge_valf(), __c) * __b; + } else if ((std::isinf(__a) || std::isinf(__b)) && std::isfinite(__c) && + std::isfinite(__d)) { + __a = std::copysign(std::isinf(__a) ? 1 : 0, __a); + __b = std::copysign(std::isinf(__b) ? 1 : 0, __b); + __real__(z) = __builtin_huge_valf() * (__a * __c + __b * __d); + __imag__(z) = __builtin_huge_valf() * (__b * __c - __a * __d); + } else if (std::isinf(__logbw) && __logbw > 0 && std::isfinite(__a) && + std::isfinite(__b)) { + __c = std::copysign(std::isinf(__c) ? 1 : 0, __c); + __d = std::copysign(std::isinf(__d) ? 1 : 0, __d); + __real__(z) = 0 * (__a * __c + __b * __d); + __imag__(z) = 0 * (__b * __c - __a * __d); + } + } + return z; +} + +#endif // __CLANG_CUDA_COMPLEX_BUILTINS diff --git a/c_headers/__clang_cuda_intrinsics.h b/c_headers/__clang_cuda_intrinsics.h new file mode 100644 index 000000000..b43ce21d0 --- /dev/null +++ b/c_headers/__clang_cuda_intrinsics.h @@ -0,0 +1,322 @@ +/*===--- __clang_cuda_intrinsics.h - Device-side CUDA intrinsic wrappers ---=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __CLANG_CUDA_INTRINSICS_H__ +#define __CLANG_CUDA_INTRINSICS_H__ +#ifndef __CUDA__ +#error "This file is for CUDA compilation only." +#endif + +// sm_30 intrinsics: __shfl_{up,down,xor}. + +#define __SM_30_INTRINSICS_H__ +#define __SM_30_INTRINSICS_HPP__ + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300 + +#pragma push_macro("__MAKE_SHUFFLES") +#define __MAKE_SHUFFLES(__FnName, __IntIntrinsic, __FloatIntrinsic, __Mask) \ + inline __device__ int __FnName(int __val, int __offset, \ + int __width = warpSize) { \ + return __IntIntrinsic(__val, __offset, \ + ((warpSize - __width) << 8) | (__Mask)); \ + } \ + inline __device__ float __FnName(float __val, int __offset, \ + int __width = warpSize) { \ + return __FloatIntrinsic(__val, __offset, \ + ((warpSize - __width) << 8) | (__Mask)); \ + } \ + inline __device__ unsigned int __FnName(unsigned int __val, int __offset, \ + int __width = warpSize) { \ + return static_cast( \ + ::__FnName(static_cast(__val), __offset, __width)); \ + } \ + inline __device__ long long __FnName(long long __val, int __offset, \ + int __width = warpSize) { \ + struct __Bits { \ + int __a, __b; \ + }; \ + _Static_assert(sizeof(__val) == sizeof(__Bits)); \ + _Static_assert(sizeof(__Bits) == 2 * sizeof(int)); \ + __Bits __tmp; \ + memcpy(&__val, &__tmp, sizeof(__val)); \ + __tmp.__a = ::__FnName(__tmp.__a, __offset, __width); \ + __tmp.__b = ::__FnName(__tmp.__b, __offset, __width); \ + long long __ret; \ + memcpy(&__ret, &__tmp, sizeof(__tmp)); \ + return __ret; \ + } \ + inline __device__ unsigned long long __FnName( \ + unsigned long long __val, int __offset, int __width = warpSize) { \ + return static_cast(::__FnName( \ + static_cast(__val), __offset, __width)); \ + } \ + inline __device__ double __FnName(double __val, int __offset, \ + int __width = warpSize) { \ + long long __tmp; \ + _Static_assert(sizeof(__tmp) == sizeof(__val)); \ + memcpy(&__tmp, &__val, sizeof(__val)); \ + __tmp = ::__FnName(__tmp, __offset, __width); \ + double __ret; \ + memcpy(&__ret, &__tmp, sizeof(__ret)); \ + return __ret; \ + } + +__MAKE_SHUFFLES(__shfl, __nvvm_shfl_idx_i32, __nvvm_shfl_idx_f32, 0x1f); +// We use 0 rather than 31 as our mask, because shfl.up applies to lanes >= +// maxLane. +__MAKE_SHUFFLES(__shfl_up, __nvvm_shfl_up_i32, __nvvm_shfl_up_f32, 0); +__MAKE_SHUFFLES(__shfl_down, __nvvm_shfl_down_i32, __nvvm_shfl_down_f32, 0x1f); +__MAKE_SHUFFLES(__shfl_xor, __nvvm_shfl_bfly_i32, __nvvm_shfl_bfly_f32, 0x1f); + +#pragma pop_macro("__MAKE_SHUFFLES") + +#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300 + +// sm_32 intrinsics: __ldg and __funnelshift_{l,lc,r,rc}. + +// Prevent the vanilla sm_32 intrinsics header from being included. +#define __SM_32_INTRINSICS_H__ +#define __SM_32_INTRINSICS_HPP__ + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320 + +inline __device__ char __ldg(const char *ptr) { return __nvvm_ldg_c(ptr); } +inline __device__ short __ldg(const short *ptr) { return __nvvm_ldg_s(ptr); } +inline __device__ int __ldg(const int *ptr) { return __nvvm_ldg_i(ptr); } +inline __device__ long __ldg(const long *ptr) { return __nvvm_ldg_l(ptr); } +inline __device__ long long __ldg(const long long *ptr) { + return __nvvm_ldg_ll(ptr); +} +inline __device__ unsigned char __ldg(const unsigned char *ptr) { + return __nvvm_ldg_uc(ptr); +} +inline __device__ unsigned short __ldg(const unsigned short *ptr) { + return __nvvm_ldg_us(ptr); +} +inline __device__ unsigned int __ldg(const unsigned int *ptr) { + return __nvvm_ldg_ui(ptr); +} +inline __device__ unsigned long __ldg(const unsigned long *ptr) { + return __nvvm_ldg_ul(ptr); +} +inline __device__ unsigned long long __ldg(const unsigned long long *ptr) { + return __nvvm_ldg_ull(ptr); +} +inline __device__ float __ldg(const float *ptr) { return __nvvm_ldg_f(ptr); } +inline __device__ double __ldg(const double *ptr) { return __nvvm_ldg_d(ptr); } + +inline __device__ char2 __ldg(const char2 *ptr) { + typedef char c2 __attribute__((ext_vector_type(2))); + // We can assume that ptr is aligned at least to char2's alignment, but the + // load will assume that ptr is aligned to char2's alignment. This is only + // safe if alignof(c2) <= alignof(char2). + c2 rv = __nvvm_ldg_c2(reinterpret_cast(ptr)); + char2 ret; + ret.x = rv[0]; + ret.y = rv[1]; + return ret; +} +inline __device__ char4 __ldg(const char4 *ptr) { + typedef char c4 __attribute__((ext_vector_type(4))); + c4 rv = __nvvm_ldg_c4(reinterpret_cast(ptr)); + char4 ret; + ret.x = rv[0]; + ret.y = rv[1]; + ret.z = rv[2]; + ret.w = rv[3]; + return ret; +} +inline __device__ short2 __ldg(const short2 *ptr) { + typedef short s2 __attribute__((ext_vector_type(2))); + s2 rv = __nvvm_ldg_s2(reinterpret_cast(ptr)); + short2 ret; + ret.x = rv[0]; + ret.y = rv[1]; + return ret; +} +inline __device__ short4 __ldg(const short4 *ptr) { + typedef short s4 __attribute__((ext_vector_type(4))); + s4 rv = __nvvm_ldg_s4(reinterpret_cast(ptr)); + short4 ret; + ret.x = rv[0]; + ret.y = rv[1]; + ret.z = rv[2]; + ret.w = rv[3]; + return ret; +} +inline __device__ int2 __ldg(const int2 *ptr) { + typedef int i2 __attribute__((ext_vector_type(2))); + i2 rv = __nvvm_ldg_i2(reinterpret_cast(ptr)); + int2 ret; + ret.x = rv[0]; + ret.y = rv[1]; + return ret; +} +inline __device__ int4 __ldg(const int4 *ptr) { + typedef int i4 __attribute__((ext_vector_type(4))); + i4 rv = __nvvm_ldg_i4(reinterpret_cast(ptr)); + int4 ret; + ret.x = rv[0]; + ret.y = rv[1]; + ret.z = rv[2]; + ret.w = rv[3]; + return ret; +} +inline __device__ longlong2 __ldg(const longlong2 *ptr) { + typedef long long ll2 __attribute__((ext_vector_type(2))); + ll2 rv = __nvvm_ldg_ll2(reinterpret_cast(ptr)); + longlong2 ret; + ret.x = rv[0]; + ret.y = rv[1]; + return ret; +} + +inline __device__ uchar2 __ldg(const uchar2 *ptr) { + typedef unsigned char uc2 __attribute__((ext_vector_type(2))); + uc2 rv = __nvvm_ldg_uc2(reinterpret_cast(ptr)); + uchar2 ret; + ret.x = rv[0]; + ret.y = rv[1]; + return ret; +} +inline __device__ uchar4 __ldg(const uchar4 *ptr) { + typedef unsigned char uc4 __attribute__((ext_vector_type(4))); + uc4 rv = __nvvm_ldg_uc4(reinterpret_cast(ptr)); + uchar4 ret; + ret.x = rv[0]; + ret.y = rv[1]; + ret.z = rv[2]; + ret.w = rv[3]; + return ret; +} +inline __device__ ushort2 __ldg(const ushort2 *ptr) { + typedef unsigned short us2 __attribute__((ext_vector_type(2))); + us2 rv = __nvvm_ldg_us2(reinterpret_cast(ptr)); + ushort2 ret; + ret.x = rv[0]; + ret.y = rv[1]; + return ret; +} +inline __device__ ushort4 __ldg(const ushort4 *ptr) { + typedef unsigned short us4 __attribute__((ext_vector_type(4))); + us4 rv = __nvvm_ldg_us4(reinterpret_cast(ptr)); + ushort4 ret; + ret.x = rv[0]; + ret.y = rv[1]; + ret.z = rv[2]; + ret.w = rv[3]; + return ret; +} +inline __device__ uint2 __ldg(const uint2 *ptr) { + typedef unsigned int ui2 __attribute__((ext_vector_type(2))); + ui2 rv = __nvvm_ldg_ui2(reinterpret_cast(ptr)); + uint2 ret; + ret.x = rv[0]; + ret.y = rv[1]; + return ret; +} +inline __device__ uint4 __ldg(const uint4 *ptr) { + typedef unsigned int ui4 __attribute__((ext_vector_type(4))); + ui4 rv = __nvvm_ldg_ui4(reinterpret_cast(ptr)); + uint4 ret; + ret.x = rv[0]; + ret.y = rv[1]; + ret.z = rv[2]; + ret.w = rv[3]; + return ret; +} +inline __device__ ulonglong2 __ldg(const ulonglong2 *ptr) { + typedef unsigned long long ull2 __attribute__((ext_vector_type(2))); + ull2 rv = __nvvm_ldg_ull2(reinterpret_cast(ptr)); + ulonglong2 ret; + ret.x = rv[0]; + ret.y = rv[1]; + return ret; +} + +inline __device__ float2 __ldg(const float2 *ptr) { + typedef float f2 __attribute__((ext_vector_type(2))); + f2 rv = __nvvm_ldg_f2(reinterpret_cast(ptr)); + float2 ret; + ret.x = rv[0]; + ret.y = rv[1]; + return ret; +} +inline __device__ float4 __ldg(const float4 *ptr) { + typedef float f4 __attribute__((ext_vector_type(4))); + f4 rv = __nvvm_ldg_f4(reinterpret_cast(ptr)); + float4 ret; + ret.x = rv[0]; + ret.y = rv[1]; + ret.z = rv[2]; + ret.w = rv[3]; + return ret; +} +inline __device__ double2 __ldg(const double2 *ptr) { + typedef double d2 __attribute__((ext_vector_type(2))); + d2 rv = __nvvm_ldg_d2(reinterpret_cast(ptr)); + double2 ret; + ret.x = rv[0]; + ret.y = rv[1]; + return ret; +} + +// TODO: Implement these as intrinsics, so the backend can work its magic on +// these. Alternatively, we could implement these as plain C and try to get +// llvm to recognize the relevant patterns. +inline __device__ unsigned __funnelshift_l(unsigned low32, unsigned high32, + unsigned shiftWidth) { + unsigned result; + asm("shf.l.wrap.b32 %0, %1, %2, %3;" + : "=r"(result) + : "r"(low32), "r"(high32), "r"(shiftWidth)); + return result; +} +inline __device__ unsigned __funnelshift_lc(unsigned low32, unsigned high32, + unsigned shiftWidth) { + unsigned result; + asm("shf.l.clamp.b32 %0, %1, %2, %3;" + : "=r"(result) + : "r"(low32), "r"(high32), "r"(shiftWidth)); + return result; +} +inline __device__ unsigned __funnelshift_r(unsigned low32, unsigned high32, + unsigned shiftWidth) { + unsigned result; + asm("shf.r.wrap.b32 %0, %1, %2, %3;" + : "=r"(result) + : "r"(low32), "r"(high32), "r"(shiftWidth)); + return result; +} +inline __device__ unsigned __funnelshift_rc(unsigned low32, unsigned high32, + unsigned shiftWidth) { + unsigned ret; + asm("shf.r.clamp.b32 %0, %1, %2, %3;" + : "=r"(ret) + : "r"(low32), "r"(high32), "r"(shiftWidth)); + return ret; +} + +#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320 + +#endif // defined(__CLANG_CUDA_INTRINSICS_H__) diff --git a/c_headers/__clang_cuda_math_forward_declares.h b/c_headers/__clang_cuda_math_forward_declares.h new file mode 100644 index 000000000..49c805151 --- /dev/null +++ b/c_headers/__clang_cuda_math_forward_declares.h @@ -0,0 +1,286 @@ +/*===- __clang_math_forward_declares.h - Prototypes of __device__ math fns --=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __CLANG__CUDA_MATH_FORWARD_DECLARES_H__ +#define __CLANG__CUDA_MATH_FORWARD_DECLARES_H__ +#ifndef __CUDA__ +#error "This file is for CUDA compilation only." +#endif + +// This file forward-declares of some math functions we (or the CUDA headers) +// will define later. We need to do this, and do it before cmath is included, +// because the standard library may have constexpr math functions. In the +// absence of a prior __device__ decl, those constexpr functions may become +// implicitly host+device. host+device functions can't be overloaded, so that +// would preclude the use of our own __device__ overloads for these functions. + +#pragma push_macro("__DEVICE__") +#define __DEVICE__ \ + static __inline__ __attribute__((always_inline)) __attribute__((device)) + +__DEVICE__ double abs(double); +__DEVICE__ float abs(float); +__DEVICE__ int abs(int); +__DEVICE__ long abs(long); +__DEVICE__ long long abs(long long); +__DEVICE__ double acos(double); +__DEVICE__ float acos(float); +__DEVICE__ double acosh(double); +__DEVICE__ float acosh(float); +__DEVICE__ double asin(double); +__DEVICE__ float asin(float); +__DEVICE__ double asinh(double); +__DEVICE__ float asinh(float); +__DEVICE__ double atan2(double, double); +__DEVICE__ float atan2(float, float); +__DEVICE__ double atan(double); +__DEVICE__ float atan(float); +__DEVICE__ double atanh(double); +__DEVICE__ float atanh(float); +__DEVICE__ double cbrt(double); +__DEVICE__ float cbrt(float); +__DEVICE__ double ceil(double); +__DEVICE__ float ceil(float); +__DEVICE__ double copysign(double, double); +__DEVICE__ float copysign(float, float); +__DEVICE__ double cos(double); +__DEVICE__ float cos(float); +__DEVICE__ double cosh(double); +__DEVICE__ float cosh(float); +__DEVICE__ double erfc(double); +__DEVICE__ float erfc(float); +__DEVICE__ double erf(double); +__DEVICE__ float erf(float); +__DEVICE__ double exp2(double); +__DEVICE__ float exp2(float); +__DEVICE__ double exp(double); +__DEVICE__ float exp(float); +__DEVICE__ double expm1(double); +__DEVICE__ float expm1(float); +__DEVICE__ double fabs(double); +__DEVICE__ float fabs(float); +__DEVICE__ double fdim(double, double); +__DEVICE__ float fdim(float, float); +__DEVICE__ double floor(double); +__DEVICE__ float floor(float); +__DEVICE__ double fma(double, double, double); +__DEVICE__ float fma(float, float, float); +__DEVICE__ double fmax(double, double); +__DEVICE__ float fmax(float, float); +__DEVICE__ double fmin(double, double); +__DEVICE__ float fmin(float, float); +__DEVICE__ double fmod(double, double); +__DEVICE__ float fmod(float, float); +__DEVICE__ int fpclassify(double); +__DEVICE__ int fpclassify(float); +__DEVICE__ double frexp(double, int *); +__DEVICE__ float frexp(float, int *); +__DEVICE__ double hypot(double, double); +__DEVICE__ float hypot(float, float); +__DEVICE__ int ilogb(double); +__DEVICE__ int ilogb(float); +__DEVICE__ bool isfinite(double); +__DEVICE__ bool isfinite(float); +__DEVICE__ bool isgreater(double, double); +__DEVICE__ bool isgreaterequal(double, double); +__DEVICE__ bool isgreaterequal(float, float); +__DEVICE__ bool isgreater(float, float); +__DEVICE__ bool isinf(double); +__DEVICE__ bool isinf(float); +__DEVICE__ bool isless(double, double); +__DEVICE__ bool islessequal(double, double); +__DEVICE__ bool islessequal(float, float); +__DEVICE__ bool isless(float, float); +__DEVICE__ bool islessgreater(double, double); +__DEVICE__ bool islessgreater(float, float); +__DEVICE__ bool isnan(double); +__DEVICE__ bool isnan(float); +__DEVICE__ bool isnormal(double); +__DEVICE__ bool isnormal(float); +__DEVICE__ bool isunordered(double, double); +__DEVICE__ bool isunordered(float, float); +__DEVICE__ long labs(long); +__DEVICE__ double ldexp(double, int); +__DEVICE__ float ldexp(float, int); +__DEVICE__ double lgamma(double); +__DEVICE__ float lgamma(float); +__DEVICE__ long long llabs(long long); +__DEVICE__ long long llrint(double); +__DEVICE__ long long llrint(float); +__DEVICE__ double log10(double); +__DEVICE__ float log10(float); +__DEVICE__ double log1p(double); +__DEVICE__ float log1p(float); +__DEVICE__ double log2(double); +__DEVICE__ float log2(float); +__DEVICE__ double logb(double); +__DEVICE__ float logb(float); +__DEVICE__ double log(double); +__DEVICE__ float log(float); +__DEVICE__ long lrint(double); +__DEVICE__ long lrint(float); +__DEVICE__ long lround(double); +__DEVICE__ long lround(float); +__DEVICE__ long long llround(float); // No llround(double). +__DEVICE__ double modf(double, double *); +__DEVICE__ float modf(float, float *); +__DEVICE__ double nan(const char *); +__DEVICE__ float nanf(const char *); +__DEVICE__ double nearbyint(double); +__DEVICE__ float nearbyint(float); +__DEVICE__ double nextafter(double, double); +__DEVICE__ float nextafter(float, float); +__DEVICE__ double nexttoward(double, double); +__DEVICE__ float nexttoward(float, double); +__DEVICE__ float nexttowardf(float, double); +__DEVICE__ double pow(double, double); +__DEVICE__ double pow(double, int); +__DEVICE__ float pow(float, float); +__DEVICE__ float pow(float, int); +__DEVICE__ double remainder(double, double); +__DEVICE__ float remainder(float, float); +__DEVICE__ double remquo(double, double, int *); +__DEVICE__ float remquo(float, float, int *); +__DEVICE__ double rint(double); +__DEVICE__ float rint(float); +__DEVICE__ double round(double); +__DEVICE__ float round(float); +__DEVICE__ double scalbln(double, long); +__DEVICE__ float scalbln(float, long); +__DEVICE__ double scalbn(double, int); +__DEVICE__ float scalbn(float, int); +__DEVICE__ bool signbit(double); +__DEVICE__ bool signbit(float); +__DEVICE__ double sin(double); +__DEVICE__ float sin(float); +__DEVICE__ double sinh(double); +__DEVICE__ float sinh(float); +__DEVICE__ double sqrt(double); +__DEVICE__ float sqrt(float); +__DEVICE__ double tan(double); +__DEVICE__ float tan(float); +__DEVICE__ double tanh(double); +__DEVICE__ float tanh(float); +__DEVICE__ double tgamma(double); +__DEVICE__ float tgamma(float); +__DEVICE__ double trunc(double); +__DEVICE__ float trunc(float); + +// We need to define these overloads in exactly the namespace our standard +// library uses (including the right inline namespace), otherwise they won't be +// picked up by other functions in the standard library (e.g. functions in +// ). Thus the ugliness below. +#ifdef _LIBCPP_BEGIN_NAMESPACE_STD +_LIBCPP_BEGIN_NAMESPACE_STD +#else +namespace std { +#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION +_GLIBCXX_BEGIN_NAMESPACE_VERSION +#endif +#endif + +using ::abs; +using ::acos; +using ::acosh; +using ::asin; +using ::asinh; +using ::atan; +using ::atan2; +using ::atanh; +using ::cbrt; +using ::ceil; +using ::copysign; +using ::cos; +using ::cosh; +using ::erf; +using ::erfc; +using ::exp; +using ::exp2; +using ::expm1; +using ::fabs; +using ::fdim; +using ::floor; +using ::fma; +using ::fmax; +using ::fmin; +using ::fmod; +using ::fpclassify; +using ::frexp; +using ::hypot; +using ::ilogb; +using ::isfinite; +using ::isgreater; +using ::isgreaterequal; +using ::isinf; +using ::isless; +using ::islessequal; +using ::islessgreater; +using ::isnan; +using ::isnormal; +using ::isunordered; +using ::labs; +using ::ldexp; +using ::lgamma; +using ::llabs; +using ::llrint; +using ::log; +using ::log10; +using ::log1p; +using ::log2; +using ::logb; +using ::lrint; +using ::lround; +using ::llround; +using ::modf; +using ::nan; +using ::nanf; +using ::nearbyint; +using ::nextafter; +using ::nexttoward; +using ::pow; +using ::remainder; +using ::remquo; +using ::rint; +using ::round; +using ::scalbln; +using ::scalbn; +using ::signbit; +using ::sin; +using ::sinh; +using ::sqrt; +using ::tan; +using ::tanh; +using ::tgamma; +using ::trunc; + +#ifdef _LIBCPP_END_NAMESPACE_STD +_LIBCPP_END_NAMESPACE_STD +#else +#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION +_GLIBCXX_END_NAMESPACE_VERSION +#endif +} // namespace std +#endif + +#pragma pop_macro("__DEVICE__") + +#endif diff --git a/c_headers/__clang_cuda_runtime_wrapper.h b/c_headers/__clang_cuda_runtime_wrapper.h new file mode 100644 index 000000000..931d44b69 --- /dev/null +++ b/c_headers/__clang_cuda_runtime_wrapper.h @@ -0,0 +1,347 @@ +/*===---- __clang_cuda_runtime_wrapper.h - CUDA runtime support -------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +/* + * WARNING: This header is intended to be directly -include'd by + * the compiler and is not supposed to be included by users. + * + * CUDA headers are implemented in a way that currently makes it + * impossible for user code to #include directly when compiling with + * Clang. They present different view of CUDA-supplied functions + * depending on where in NVCC's compilation pipeline the headers are + * included. Neither of these modes provides function definitions with + * correct attributes, so we use preprocessor to force the headers + * into a form that Clang can use. + * + * Similarly to NVCC which -include's cuda_runtime.h, Clang -include's + * this file during every CUDA compilation. + */ + +#ifndef __CLANG_CUDA_RUNTIME_WRAPPER_H__ +#define __CLANG_CUDA_RUNTIME_WRAPPER_H__ + +#if defined(__CUDA__) && defined(__clang__) + +// Include some forward declares that must come before cmath. +#include <__clang_cuda_math_forward_declares.h> + +// Include some standard headers to avoid CUDA headers including them +// while some required macros (like __THROW) are in a weird state. +#include +#include +#include + +// Preserve common macros that will be changed below by us or by CUDA +// headers. +#pragma push_macro("__THROW") +#pragma push_macro("__CUDA_ARCH__") + +// WARNING: Preprocessor hacks below are based on specific details of +// CUDA-7.x headers and are not expected to work with any other +// version of CUDA headers. +#include "cuda.h" +#if !defined(CUDA_VERSION) +#error "cuda.h did not define CUDA_VERSION" +#elif CUDA_VERSION < 7000 || CUDA_VERSION > 8000 +#error "Unsupported CUDA version!" +#endif + +// Make largest subset of device functions available during host +// compilation -- SM_35 for the time being. +#ifndef __CUDA_ARCH__ +#define __CUDA_ARCH__ 350 +#endif + +#include "__clang_cuda_builtin_vars.h" + +// No need for device_launch_parameters.h as __clang_cuda_builtin_vars.h above +// has taken care of builtin variables declared in the file. +#define __DEVICE_LAUNCH_PARAMETERS_H__ + +// {math,device}_functions.h only have declarations of the +// functions. We don't need them as we're going to pull in their +// definitions from .hpp files. +#define __DEVICE_FUNCTIONS_H__ +#define __MATH_FUNCTIONS_H__ +#define __COMMON_FUNCTIONS_H__ + +#undef __CUDACC__ +#define __CUDABE__ +// Disables definitions of device-side runtime support stubs in +// cuda_device_runtime_api.h +#include "driver_types.h" +#include "host_config.h" +#include "host_defines.h" + +#undef __CUDABE__ +#define __CUDACC__ +#include "cuda_runtime.h" + +#undef __CUDACC__ +#define __CUDABE__ + +// CUDA headers use __nvvm_memcpy and __nvvm_memset which Clang does +// not have at the moment. Emulate them with a builtin memcpy/memset. +#define __nvvm_memcpy(s, d, n, a) __builtin_memcpy(s, d, n) +#define __nvvm_memset(d, c, n, a) __builtin_memset(d, c, n) + +#include "crt/device_runtime.h" +#include "crt/host_runtime.h" +// device_runtime.h defines __cxa_* macros that will conflict with +// cxxabi.h. +// FIXME: redefine these as __device__ functions. +#undef __cxa_vec_ctor +#undef __cxa_vec_cctor +#undef __cxa_vec_dtor +#undef __cxa_vec_new +#undef __cxa_vec_new2 +#undef __cxa_vec_new3 +#undef __cxa_vec_delete2 +#undef __cxa_vec_delete +#undef __cxa_vec_delete3 +#undef __cxa_pure_virtual + +// math_functions.hpp expects this host function be defined on MacOS, but it +// ends up not being there because of the games we play here. Just define it +// ourselves; it's simple enough. +#ifdef __APPLE__ +inline __host__ double __signbitd(double x) { + return std::signbit(x); +} +#endif + +// We need decls for functions in CUDA's libdevice with __device__ +// attribute only. Alas they come either as __host__ __device__ or +// with no attributes at all. To work around that, define __CUDA_RTC__ +// which produces HD variant and undef __host__ which gives us desided +// decls with __device__ attribute. +#pragma push_macro("__host__") +#define __host__ +#define __CUDACC_RTC__ +#include "device_functions_decls.h" +#undef __CUDACC_RTC__ + +// Temporarily poison __host__ macro to ensure it's not used by any of +// the headers we're about to include. +#define __host__ UNEXPECTED_HOST_ATTRIBUTE + +// CUDA 8.0.41 relies on __USE_FAST_MATH__ and __CUDA_PREC_DIV's values. +// Previous versions used to check whether they are defined or not. +// CU_DEVICE_INVALID macro is only defined in 8.0.41, so we use it +// here to detect the switch. + +#if defined(CU_DEVICE_INVALID) +#if !defined(__USE_FAST_MATH__) +#define __USE_FAST_MATH__ 0 +#endif + +#if !defined(__CUDA_PREC_DIV) +#define __CUDA_PREC_DIV 0 +#endif +#endif + +// device_functions.hpp and math_functions*.hpp use 'static +// __forceinline__' (with no __device__) for definitions of device +// functions. Temporarily redefine __forceinline__ to include +// __device__. +#pragma push_macro("__forceinline__") +#define __forceinline__ __device__ __inline__ __attribute__((always_inline)) +#include "device_functions.hpp" + +// math_function.hpp uses the __USE_FAST_MATH__ macro to determine whether we +// get the slow-but-accurate or fast-but-inaccurate versions of functions like +// sin and exp. This is controlled in clang by -fcuda-approx-transcendentals. +// +// device_functions.hpp uses __USE_FAST_MATH__ for a different purpose (fast vs. +// slow divides), so we need to scope our define carefully here. +#pragma push_macro("__USE_FAST_MATH__") +#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__) +#define __USE_FAST_MATH__ 1 +#endif +#include "math_functions.hpp" +#pragma pop_macro("__USE_FAST_MATH__") + +#include "math_functions_dbl_ptx3.hpp" +#pragma pop_macro("__forceinline__") + +// Pull in host-only functions that are only available when neither +// __CUDACC__ nor __CUDABE__ are defined. +#undef __MATH_FUNCTIONS_HPP__ +#undef __CUDABE__ +#include "math_functions.hpp" +// Alas, additional overloads for these functions are hard to get to. +// Considering that we only need these overloads for a few functions, +// we can provide them here. +static inline float rsqrt(float __a) { return rsqrtf(__a); } +static inline float rcbrt(float __a) { return rcbrtf(__a); } +static inline float sinpi(float __a) { return sinpif(__a); } +static inline float cospi(float __a) { return cospif(__a); } +static inline void sincospi(float __a, float *__b, float *__c) { + return sincospif(__a, __b, __c); +} +static inline float erfcinv(float __a) { return erfcinvf(__a); } +static inline float normcdfinv(float __a) { return normcdfinvf(__a); } +static inline float normcdf(float __a) { return normcdff(__a); } +static inline float erfcx(float __a) { return erfcxf(__a); } + +// For some reason single-argument variant is not always declared by +// CUDA headers. Alas, device_functions.hpp included below needs it. +static inline __device__ void __brkpt(int __c) { __brkpt(); } + +// Now include *.hpp with definitions of various GPU functions. Alas, +// a lot of thins get declared/defined with __host__ attribute which +// we don't want and we have to define it out. We also have to include +// {device,math}_functions.hpp again in order to extract the other +// branch of #if/else inside. + +#define __host__ +#undef __CUDABE__ +#define __CUDACC__ +#undef __DEVICE_FUNCTIONS_HPP__ +#include "device_atomic_functions.hpp" +#include "device_functions.hpp" +#include "sm_20_atomic_functions.hpp" +#include "sm_20_intrinsics.hpp" +#include "sm_32_atomic_functions.hpp" + +// Don't include sm_30_intrinsics.h and sm_32_intrinsics.h. These define the +// __shfl and __ldg intrinsics using inline (volatile) asm, but we want to +// define them using builtins so that the optimizer can reason about and across +// these instructions. In particular, using intrinsics for ldg gets us the +// [addr+imm] addressing mode, which, although it doesn't actually exist in the +// hardware, seems to generate faster machine code because ptxas can more easily +// reason about our code. + +#if CUDA_VERSION >= 8000 +#include "sm_60_atomic_functions.hpp" +#include "sm_61_intrinsics.hpp" +#endif + +#undef __MATH_FUNCTIONS_HPP__ + +// math_functions.hpp defines ::signbit as a __host__ __device__ function. This +// conflicts with libstdc++'s constexpr ::signbit, so we have to rename +// math_function.hpp's ::signbit. It's guarded by #undef signbit, but that's +// conditional on __GNUC__. :) +#pragma push_macro("signbit") +#pragma push_macro("__GNUC__") +#undef __GNUC__ +#define signbit __ignored_cuda_signbit +#include "math_functions.hpp" +#pragma pop_macro("__GNUC__") +#pragma pop_macro("signbit") + +#pragma pop_macro("__host__") + +#include "texture_indirect_functions.h" + +// Restore state of __CUDA_ARCH__ and __THROW we had on entry. +#pragma pop_macro("__CUDA_ARCH__") +#pragma pop_macro("__THROW") + +// Set up compiler macros expected to be seen during compilation. +#undef __CUDABE__ +#define __CUDACC__ + +extern "C" { +// Device-side CUDA system calls. +// http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability/index.html#system-calls +// We need these declarations and wrappers for device-side +// malloc/free/printf calls to work without relying on +// -fcuda-disable-target-call-checks option. +__device__ int vprintf(const char *, const char *); +__device__ void free(void *) __attribute((nothrow)); +__device__ void *malloc(size_t) __attribute((nothrow)) __attribute__((malloc)); +__device__ void __assertfail(const char *__message, const char *__file, + unsigned __line, const char *__function, + size_t __charSize) __attribute__((noreturn)); + +// In order for standard assert() macro on linux to work we need to +// provide device-side __assert_fail() +__device__ static inline void __assert_fail(const char *__message, + const char *__file, unsigned __line, + const char *__function) { + __assertfail(__message, __file, __line, __function, sizeof(char)); +} + +// Clang will convert printf into vprintf, but we still need +// device-side declaration for it. +__device__ int printf(const char *, ...); +} // extern "C" + +// We also need device-side std::malloc and std::free. +namespace std { +__device__ static inline void free(void *__ptr) { ::free(__ptr); } +__device__ static inline void *malloc(size_t __size) { + return ::malloc(__size); +} +} // namespace std + +// Out-of-line implementations from __clang_cuda_builtin_vars.h. These need to +// come after we've pulled in the definition of uint3 and dim3. + +__device__ inline __cuda_builtin_threadIdx_t::operator uint3() const { + uint3 ret; + ret.x = x; + ret.y = y; + ret.z = z; + return ret; +} + +__device__ inline __cuda_builtin_blockIdx_t::operator uint3() const { + uint3 ret; + ret.x = x; + ret.y = y; + ret.z = z; + return ret; +} + +__device__ inline __cuda_builtin_blockDim_t::operator dim3() const { + return dim3(x, y, z); +} + +__device__ inline __cuda_builtin_gridDim_t::operator dim3() const { + return dim3(x, y, z); +} + +#include <__clang_cuda_cmath.h> +#include <__clang_cuda_intrinsics.h> +#include <__clang_cuda_complex_builtins.h> + +// curand_mtgp32_kernel helpfully redeclares blockDim and threadIdx in host +// mode, giving them their "proper" types of dim3 and uint3. This is +// incompatible with the types we give in __clang_cuda_builtin_vars.h. As as +// hack, force-include the header (nvcc doesn't include it by default) but +// redefine dim3 and uint3 to our builtin types. (Thankfully dim3 and uint3 are +// only used here for the redeclarations of blockDim and threadIdx.) +#pragma push_macro("dim3") +#pragma push_macro("uint3") +#define dim3 __cuda_builtin_blockDim_t +#define uint3 __cuda_builtin_threadIdx_t +#include "curand_mtgp32_kernel.h" +#pragma pop_macro("dim3") +#pragma pop_macro("uint3") +#pragma pop_macro("__USE_FAST_MATH__") + +#endif // __CUDA__ +#endif // __CLANG_CUDA_RUNTIME_WRAPPER_H__ diff --git a/c_headers/__wmmintrin_aes.h b/c_headers/__wmmintrin_aes.h index 9f594ee56..3a2ee1b2e 100644 --- a/c_headers/__wmmintrin_aes.h +++ b/c_headers/__wmmintrin_aes.h @@ -25,48 +25,127 @@ #include -#if !defined (__AES__) -# error "AES instructions not enabled" -#else - /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("aes"))) +/// \brief Performs a single round of AES encryption using the Equivalent +/// Inverse Cipher, transforming the state value from the first source +/// operand using a 128-bit round key value contained in the second source +/// operand, and writes the result to the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VAESENC instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the state value. +/// \param __R +/// A 128-bit integer vector containing the round key value. +/// \returns A 128-bit integer vector containing the encrypted value. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_aesenc_si128(__m128i __V, __m128i __R) { - return (__m128i)__builtin_ia32_aesenc128(__V, __R); + return (__m128i)__builtin_ia32_aesenc128((__v2di)__V, (__v2di)__R); } +/// \brief Performs the final round of AES encryption using the Equivalent +/// Inverse Cipher, transforming the state value from the first source +/// operand using a 128-bit round key value contained in the second source +/// operand, and writes the result to the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VAESENCLAST instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the state value. +/// \param __R +/// A 128-bit integer vector containing the round key value. +/// \returns A 128-bit integer vector containing the encrypted value. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_aesenclast_si128(__m128i __V, __m128i __R) { - return (__m128i)__builtin_ia32_aesenclast128(__V, __R); + return (__m128i)__builtin_ia32_aesenclast128((__v2di)__V, (__v2di)__R); } +/// \brief Performs a single round of AES decryption using the Equivalent +/// Inverse Cipher, transforming the state value from the first source +/// operand using a 128-bit round key value contained in the second source +/// operand, and writes the result to the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VAESDEC instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the state value. +/// \param __R +/// A 128-bit integer vector containing the round key value. +/// \returns A 128-bit integer vector containing the decrypted value. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_aesdec_si128(__m128i __V, __m128i __R) { - return (__m128i)__builtin_ia32_aesdec128(__V, __R); + return (__m128i)__builtin_ia32_aesdec128((__v2di)__V, (__v2di)__R); } +/// \brief Performs the final round of AES decryption using the Equivalent +/// Inverse Cipher, transforming the state value from the first source +/// operand using a 128-bit round key value contained in the second source +/// operand, and writes the result to the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VAESDECLAST instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the state value. +/// \param __R +/// A 128-bit integer vector containing the round key value. +/// \returns A 128-bit integer vector containing the decrypted value. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_aesdeclast_si128(__m128i __V, __m128i __R) { - return (__m128i)__builtin_ia32_aesdeclast128(__V, __R); + return (__m128i)__builtin_ia32_aesdeclast128((__v2di)__V, (__v2di)__R); } +/// \brief Applies the AES InvMixColumns() transformation to an expanded key +/// contained in the source operand, and writes the result to the +/// destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VAESIMC instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the expanded key. +/// \returns A 128-bit integer vector containing the transformed value. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_aesimc_si128(__m128i __V) { - return (__m128i)__builtin_ia32_aesimc128(__V); + return (__m128i)__builtin_ia32_aesimc128((__v2di)__V); } +/// \brief Generates a round key for AES encyption, operating on 128-bit data +/// specified in the first source operand and using an 8-bit round constant +/// specified by the second source operand, and writes the result to the +/// destination. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_aeskeygenassist_si128(__m128i C, const int R); +/// \endcode +/// +/// This intrinsic corresponds to the AESKEYGENASSIST instruction. +/// +/// \param C +/// A 128-bit integer vector that is used to generate the AES encryption key. +/// \param R +/// An 8-bit round constant used to generate the AES encryption key. +/// \returns A 128-bit round key for AES encryption. #define _mm_aeskeygenassist_si128(C, R) \ - __builtin_ia32_aeskeygenassist128((C), (R)) + (__m128i)__builtin_ia32_aeskeygenassist128((__v2di)(__m128i)(C), (int)(R)) #undef __DEFAULT_FN_ATTRS -#endif - #endif /* _WMMINTRIN_AES_H */ diff --git a/c_headers/__wmmintrin_pclmul.h b/c_headers/__wmmintrin_pclmul.h index 8d1f1b7c0..e9c6a9f6d 100644 --- a/c_headers/__wmmintrin_pclmul.h +++ b/c_headers/__wmmintrin_pclmul.h @@ -1,4 +1,4 @@ -/*===---- __wmmintrin_pclmul.h - AES intrinsics ----------------------------=== +/*===---- __wmmintrin_pclmul.h - PCMUL intrinsics ---------------------------=== * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -23,12 +23,35 @@ #ifndef _WMMINTRIN_PCLMUL_H #define _WMMINTRIN_PCLMUL_H -#if !defined (__PCLMUL__) -# error "PCLMUL instruction is not enabled" -#else +/// \brief Multiplies two 64-bit integer values, which are selected from source +/// operands using the immediate-value operand. The multiplication is a +/// carry-less multiplication, and the 128-bit integer product is stored in +/// the destination. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_clmulepi64_si128(__m128i __X, __m128i __Y, const int __I); +/// \endcode +/// +/// This intrinsic corresponds to the VPCLMULQDQ instruction. +/// +/// \param __X +/// A 128-bit vector of [2 x i64] containing one of the source operands. +/// \param __Y +/// A 128-bit vector of [2 x i64] containing one of the source operands. +/// \param __I +/// An immediate value specifying which 64-bit values to select from the +/// operands. Bit 0 is used to select a value from operand \a __X, and bit +/// 4 is used to select a value from operand \a __Y: \n +/// Bit[0]=0 indicates that bits[63:0] of operand \a __X are used. \n +/// Bit[0]=1 indicates that bits[127:64] of operand \a __X are used. \n +/// Bit[4]=0 indicates that bits[63:0] of operand \a __Y are used. \n +/// Bit[4]=1 indicates that bits[127:64] of operand \a __Y are used. +/// \returns The 128-bit integer vector containing the result of the carry-less +/// multiplication of the selected 64-bit values. #define _mm_clmulepi64_si128(__X, __Y, __I) \ ((__m128i)__builtin_ia32_pclmulqdq128((__v2di)(__m128i)(__X), \ (__v2di)(__m128i)(__Y), (char)(__I))) -#endif #endif /* _WMMINTRIN_PCLMUL_H */ diff --git a/c_headers/adxintrin.h b/c_headers/adxintrin.h index b8eb9cbf6..ee3472841 100644 --- a/c_headers/adxintrin.h +++ b/c_headers/adxintrin.h @@ -32,8 +32,7 @@ #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) /* Intrinsics that are available only if __ADX__ defined */ -#ifdef __ADX__ -static __inline unsigned char __DEFAULT_FN_ATTRS +static __inline unsigned char __attribute__((__always_inline__, __nodebug__, __target__("adx"))) _addcarryx_u32(unsigned char __cf, unsigned int __x, unsigned int __y, unsigned int *__p) { @@ -41,14 +40,13 @@ _addcarryx_u32(unsigned char __cf, unsigned int __x, unsigned int __y, } #ifdef __x86_64__ -static __inline unsigned char __DEFAULT_FN_ATTRS +static __inline unsigned char __attribute__((__always_inline__, __nodebug__, __target__("adx"))) _addcarryx_u64(unsigned char __cf, unsigned long long __x, unsigned long long __y, unsigned long long *__p) { return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p); } #endif -#endif /* Intrinsics that are also available if __ADX__ undefined */ static __inline unsigned char __DEFAULT_FN_ATTRS diff --git a/c_headers/altivec.h b/c_headers/altivec.h new file mode 100644 index 000000000..a01d9d837 --- /dev/null +++ b/c_headers/altivec.h @@ -0,0 +1,16733 @@ +/*===---- altivec.h - Standard header for type generic math ---------------===*\ + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * +\*===----------------------------------------------------------------------===*/ + +#ifndef __ALTIVEC_H +#define __ALTIVEC_H + +#ifndef __ALTIVEC__ +#error "AltiVec support not enabled" +#endif + +/* Constants for mapping CR6 bits to predicate result. */ + +#define __CR6_EQ 0 +#define __CR6_EQ_REV 1 +#define __CR6_LT 2 +#define __CR6_LT_REV 3 + +/* Constants for vec_test_data_class */ +#define __VEC_CLASS_FP_SUBNORMAL_N (1 << 0) +#define __VEC_CLASS_FP_SUBNORMAL_P (1 << 1) +#define __VEC_CLASS_FP_SUBNORMAL (__VEC_CLASS_FP_SUBNORMAL_P | \ + __VEC_CLASS_FP_SUBNORMAL_N) +#define __VEC_CLASS_FP_ZERO_N (1<<2) +#define __VEC_CLASS_FP_ZERO_P (1<<3) +#define __VEC_CLASS_FP_ZERO (__VEC_CLASS_FP_ZERO_P | \ + __VEC_CLASS_FP_ZERO_N) +#define __VEC_CLASS_FP_INFINITY_N (1<<4) +#define __VEC_CLASS_FP_INFINITY_P (1<<5) +#define __VEC_CLASS_FP_INFINITY (__VEC_CLASS_FP_INFINITY_P | \ + __VEC_CLASS_FP_INFINITY_N) +#define __VEC_CLASS_FP_NAN (1<<6) +#define __VEC_CLASS_FP_NOT_NORMAL (__VEC_CLASS_FP_NAN | \ + __VEC_CLASS_FP_SUBNORMAL | \ + __VEC_CLASS_FP_ZERO | \ + __VEC_CLASS_FP_INFINITY) + +#define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__)) + +#ifdef __POWER9_VECTOR__ +#include +#endif + +static __inline__ vector signed char __ATTRS_o_ai vec_perm( + vector signed char __a, vector signed char __b, vector unsigned char __c); + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_perm(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c); + +static __inline__ vector bool char __ATTRS_o_ai +vec_perm(vector bool char __a, vector bool char __b, vector unsigned char __c); + +static __inline__ vector short __ATTRS_o_ai vec_perm(vector signed short __a, + vector signed short __b, + vector unsigned char __c); + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_perm(vector unsigned short __a, vector unsigned short __b, + vector unsigned char __c); + +static __inline__ vector bool short __ATTRS_o_ai vec_perm( + vector bool short __a, vector bool short __b, vector unsigned char __c); + +static __inline__ vector pixel __ATTRS_o_ai vec_perm(vector pixel __a, + vector pixel __b, + vector unsigned char __c); + +static __inline__ vector int __ATTRS_o_ai vec_perm(vector signed int __a, + vector signed int __b, + vector unsigned char __c); + +static __inline__ vector unsigned int __ATTRS_o_ai vec_perm( + vector unsigned int __a, vector unsigned int __b, vector unsigned char __c); + +static __inline__ vector bool int __ATTRS_o_ai +vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c); + +static __inline__ vector float __ATTRS_o_ai vec_perm(vector float __a, + vector float __b, + vector unsigned char __c); + +#ifdef __VSX__ +static __inline__ vector long long __ATTRS_o_ai +vec_perm(vector signed long long __a, vector signed long long __b, + vector unsigned char __c); + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_perm(vector unsigned long long __a, vector unsigned long long __b, + vector unsigned char __c); + +static __inline__ vector bool long long __ATTRS_o_ai +vec_perm(vector bool long long __a, vector bool long long __b, + vector unsigned char __c); + +static __inline__ vector double __ATTRS_o_ai vec_perm(vector double __a, + vector double __b, + vector unsigned char __c); +#endif + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_xor(vector unsigned char __a, vector unsigned char __b); + +/* vec_abs */ + +#define __builtin_altivec_abs_v16qi vec_abs +#define __builtin_altivec_abs_v8hi vec_abs +#define __builtin_altivec_abs_v4si vec_abs + +static __inline__ vector signed char __ATTRS_o_ai +vec_abs(vector signed char __a) { + return __builtin_altivec_vmaxsb(__a, -__a); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_abs(vector signed short __a) { + return __builtin_altivec_vmaxsh(__a, -__a); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_abs(vector signed int __a) { + return __builtin_altivec_vmaxsw(__a, -__a); +} + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) +static __inline__ vector signed long long __ATTRS_o_ai +vec_abs(vector signed long long __a) { + return __builtin_altivec_vmaxsd(__a, -__a); +} +#endif + +static __inline__ vector float __ATTRS_o_ai vec_abs(vector float __a) { +#ifdef __VSX__ + return __builtin_vsx_xvabssp(__a); +#else + vector unsigned int __res = + (vector unsigned int)__a & (vector unsigned int)(0x7FFFFFFF); + return (vector float)__res; +#endif +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_abs(vector double __a) { + return __builtin_vsx_xvabsdp(__a); +} +#endif + +/* vec_abss */ +#define __builtin_altivec_abss_v16qi vec_abss +#define __builtin_altivec_abss_v8hi vec_abss +#define __builtin_altivec_abss_v4si vec_abss + +static __inline__ vector signed char __ATTRS_o_ai +vec_abss(vector signed char __a) { + return __builtin_altivec_vmaxsb( + __a, __builtin_altivec_vsubsbs((vector signed char)(0), __a)); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_abss(vector signed short __a) { + return __builtin_altivec_vmaxsh( + __a, __builtin_altivec_vsubshs((vector signed short)(0), __a)); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_abss(vector signed int __a) { + return __builtin_altivec_vmaxsw( + __a, __builtin_altivec_vsubsws((vector signed int)(0), __a)); +} + +/* vec_absd */ +#if defined(__POWER9_VECTOR__) + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_absd(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vabsdub(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_absd(vector unsigned short __a, vector unsigned short __b) { + return __builtin_altivec_vabsduh(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_absd(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vabsduw(__a, __b); +} + +#endif /* End __POWER9_VECTOR__ */ + +/* vec_add */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_add(vector signed char __a, vector signed char __b) { + return __a + __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_add(vector bool char __a, vector signed char __b) { + return (vector signed char)__a + __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_add(vector signed char __a, vector bool char __b) { + return __a + (vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_add(vector unsigned char __a, vector unsigned char __b) { + return __a + __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_add(vector bool char __a, vector unsigned char __b) { + return (vector unsigned char)__a + __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_add(vector unsigned char __a, vector bool char __b) { + return __a + (vector unsigned char)__b; +} + +static __inline__ vector short __ATTRS_o_ai vec_add(vector short __a, + vector short __b) { + return __a + __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_add(vector bool short __a, + vector short __b) { + return (vector short)__a + __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_add(vector short __a, + vector bool short __b) { + return __a + (vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_add(vector unsigned short __a, vector unsigned short __b) { + return __a + __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_add(vector bool short __a, vector unsigned short __b) { + return (vector unsigned short)__a + __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_add(vector unsigned short __a, vector bool short __b) { + return __a + (vector unsigned short)__b; +} + +static __inline__ vector int __ATTRS_o_ai vec_add(vector int __a, + vector int __b) { + return __a + __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_add(vector bool int __a, + vector int __b) { + return (vector int)__a + __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_add(vector int __a, + vector bool int __b) { + return __a + (vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_add(vector unsigned int __a, vector unsigned int __b) { + return __a + __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_add(vector bool int __a, vector unsigned int __b) { + return (vector unsigned int)__a + __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_add(vector unsigned int __a, vector bool int __b) { + return __a + (vector unsigned int)__b; +} + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) +static __inline__ vector signed long long __ATTRS_o_ai +vec_add(vector signed long long __a, vector signed long long __b) { + return __a + __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_add(vector unsigned long long __a, vector unsigned long long __b) { + return __a + __b; +} + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_add(vector signed __int128 __a, vector signed __int128 __b) { + return __a + __b; +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_add(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __a + __b; +} +#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__) + +static __inline__ vector float __ATTRS_o_ai vec_add(vector float __a, + vector float __b) { + return __a + __b; +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_add(vector double __a, + vector double __b) { + return __a + __b; +} +#endif // __VSX__ + +/* vec_adde */ + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_adde(vector signed __int128 __a, vector signed __int128 __b, + vector signed __int128 __c) { + return __builtin_altivec_vaddeuqm(__a, __b, __c); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_adde(vector unsigned __int128 __a, vector unsigned __int128 __b, + vector unsigned __int128 __c) { + return __builtin_altivec_vaddeuqm(__a, __b, __c); +} +#endif + +static __inline__ vector signed int __ATTRS_o_ai +vec_adde(vector signed int __a, vector signed int __b, + vector signed int __c) { + vector signed int __mask = {1, 1, 1, 1}; + vector signed int __carry = __c & __mask; + return vec_add(vec_add(__a, __b), __carry); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_adde(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c) { + vector unsigned int __mask = {1, 1, 1, 1}; + vector unsigned int __carry = __c & __mask; + return vec_add(vec_add(__a, __b), __carry); +} + +/* vec_addec */ + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_addec(vector signed __int128 __a, vector signed __int128 __b, + vector signed __int128 __c) { + return __builtin_altivec_vaddecuq(__a, __b, __c); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_addec(vector unsigned __int128 __a, vector unsigned __int128 __b, + vector unsigned __int128 __c) { + return __builtin_altivec_vaddecuq(__a, __b, __c); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_addec(vector signed int __a, vector signed int __b, + vector signed int __c) { + + signed int __result[4]; + for (int i = 0; i < 4; i++) { + unsigned int __tempa = (unsigned int) __a[i]; + unsigned int __tempb = (unsigned int) __b[i]; + unsigned int __tempc = (unsigned int) __c[i]; + __tempc = __tempc & 0x00000001; + unsigned long long __longa = (unsigned long long) __tempa; + unsigned long long __longb = (unsigned long long) __tempb; + unsigned long long __longc = (unsigned long long) __tempc; + unsigned long long __sum = __longa + __longb + __longc; + unsigned long long __res = (__sum >> 32) & 0x01; + unsigned long long __tempres = (unsigned int) __res; + __result[i] = (signed int) __tempres; + } + + vector signed int ret = { __result[0], __result[1], __result[2], __result[3] }; + return ret; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_addec(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c) { + + unsigned int __result[4]; + for (int i = 0; i < 4; i++) { + unsigned int __tempc = __c[i] & 1; + unsigned long long __longa = (unsigned long long) __a[i]; + unsigned long long __longb = (unsigned long long) __b[i]; + unsigned long long __longc = (unsigned long long) __tempc; + unsigned long long __sum = __longa + __longb + __longc; + unsigned long long __res = (__sum >> 32) & 0x01; + unsigned long long __tempres = (unsigned int) __res; + __result[i] = (signed int) __tempres; + } + + vector unsigned int ret = { __result[0], __result[1], __result[2], __result[3] }; + return ret; +} + +#endif + +/* vec_vaddubm */ + +#define __builtin_altivec_vaddubm vec_vaddubm + +static __inline__ vector signed char __ATTRS_o_ai +vec_vaddubm(vector signed char __a, vector signed char __b) { + return __a + __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vaddubm(vector bool char __a, vector signed char __b) { + return (vector signed char)__a + __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vaddubm(vector signed char __a, vector bool char __b) { + return __a + (vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vaddubm(vector unsigned char __a, vector unsigned char __b) { + return __a + __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vaddubm(vector bool char __a, vector unsigned char __b) { + return (vector unsigned char)__a + __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vaddubm(vector unsigned char __a, vector bool char __b) { + return __a + (vector unsigned char)__b; +} + +/* vec_vadduhm */ + +#define __builtin_altivec_vadduhm vec_vadduhm + +static __inline__ vector short __ATTRS_o_ai vec_vadduhm(vector short __a, + vector short __b) { + return __a + __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vadduhm(vector bool short __a, + vector short __b) { + return (vector short)__a + __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vadduhm(vector short __a, + vector bool short __b) { + return __a + (vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vadduhm(vector unsigned short __a, vector unsigned short __b) { + return __a + __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vadduhm(vector bool short __a, vector unsigned short __b) { + return (vector unsigned short)__a + __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vadduhm(vector unsigned short __a, vector bool short __b) { + return __a + (vector unsigned short)__b; +} + +/* vec_vadduwm */ + +#define __builtin_altivec_vadduwm vec_vadduwm + +static __inline__ vector int __ATTRS_o_ai vec_vadduwm(vector int __a, + vector int __b) { + return __a + __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vadduwm(vector bool int __a, + vector int __b) { + return (vector int)__a + __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vadduwm(vector int __a, + vector bool int __b) { + return __a + (vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vadduwm(vector unsigned int __a, vector unsigned int __b) { + return __a + __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vadduwm(vector bool int __a, vector unsigned int __b) { + return (vector unsigned int)__a + __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vadduwm(vector unsigned int __a, vector bool int __b) { + return __a + (vector unsigned int)__b; +} + +/* vec_vaddfp */ + +#define __builtin_altivec_vaddfp vec_vaddfp + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vaddfp(vector float __a, vector float __b) { + return __a + __b; +} + +/* vec_addc */ + +static __inline__ vector signed int __ATTRS_o_ai +vec_addc(vector signed int __a, vector signed int __b) { + return (vector signed int)__builtin_altivec_vaddcuw((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_addc(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vaddcuw(__a, __b); +} + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_addc(vector signed __int128 __a, vector signed __int128 __b) { + return (vector signed __int128)__builtin_altivec_vaddcuq( + (vector unsigned __int128)__a, (vector unsigned __int128)__b); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_addc(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __builtin_altivec_vaddcuq(__a, __b); +} +#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__) + +/* vec_vaddcuw */ + +static __inline__ vector unsigned int __attribute__((__always_inline__)) +vec_vaddcuw(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vaddcuw(__a, __b); +} + +/* vec_adds */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_adds(vector signed char __a, vector signed char __b) { + return __builtin_altivec_vaddsbs(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_adds(vector bool char __a, vector signed char __b) { + return __builtin_altivec_vaddsbs((vector signed char)__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_adds(vector signed char __a, vector bool char __b) { + return __builtin_altivec_vaddsbs(__a, (vector signed char)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_adds(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vaddubs(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_adds(vector bool char __a, vector unsigned char __b) { + return __builtin_altivec_vaddubs((vector unsigned char)__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_adds(vector unsigned char __a, vector bool char __b) { + return __builtin_altivec_vaddubs(__a, (vector unsigned char)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_adds(vector short __a, + vector short __b) { + return __builtin_altivec_vaddshs(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_adds(vector bool short __a, + vector short __b) { + return __builtin_altivec_vaddshs((vector short)__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_adds(vector short __a, + vector bool short __b) { + return __builtin_altivec_vaddshs(__a, (vector short)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_adds(vector unsigned short __a, vector unsigned short __b) { + return __builtin_altivec_vadduhs(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_adds(vector bool short __a, vector unsigned short __b) { + return __builtin_altivec_vadduhs((vector unsigned short)__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_adds(vector unsigned short __a, vector bool short __b) { + return __builtin_altivec_vadduhs(__a, (vector unsigned short)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_adds(vector int __a, + vector int __b) { + return __builtin_altivec_vaddsws(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_adds(vector bool int __a, + vector int __b) { + return __builtin_altivec_vaddsws((vector int)__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_adds(vector int __a, + vector bool int __b) { + return __builtin_altivec_vaddsws(__a, (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_adds(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vadduws(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_adds(vector bool int __a, vector unsigned int __b) { + return __builtin_altivec_vadduws((vector unsigned int)__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_adds(vector unsigned int __a, vector bool int __b) { + return __builtin_altivec_vadduws(__a, (vector unsigned int)__b); +} + +/* vec_vaddsbs */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vaddsbs(vector signed char __a, vector signed char __b) { + return __builtin_altivec_vaddsbs(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vaddsbs(vector bool char __a, vector signed char __b) { + return __builtin_altivec_vaddsbs((vector signed char)__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vaddsbs(vector signed char __a, vector bool char __b) { + return __builtin_altivec_vaddsbs(__a, (vector signed char)__b); +} + +/* vec_vaddubs */ + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vaddubs(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vaddubs(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vaddubs(vector bool char __a, vector unsigned char __b) { + return __builtin_altivec_vaddubs((vector unsigned char)__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vaddubs(vector unsigned char __a, vector bool char __b) { + return __builtin_altivec_vaddubs(__a, (vector unsigned char)__b); +} + +/* vec_vaddshs */ + +static __inline__ vector short __ATTRS_o_ai vec_vaddshs(vector short __a, + vector short __b) { + return __builtin_altivec_vaddshs(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vaddshs(vector bool short __a, + vector short __b) { + return __builtin_altivec_vaddshs((vector short)__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vaddshs(vector short __a, + vector bool short __b) { + return __builtin_altivec_vaddshs(__a, (vector short)__b); +} + +/* vec_vadduhs */ + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vadduhs(vector unsigned short __a, vector unsigned short __b) { + return __builtin_altivec_vadduhs(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vadduhs(vector bool short __a, vector unsigned short __b) { + return __builtin_altivec_vadduhs((vector unsigned short)__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vadduhs(vector unsigned short __a, vector bool short __b) { + return __builtin_altivec_vadduhs(__a, (vector unsigned short)__b); +} + +/* vec_vaddsws */ + +static __inline__ vector int __ATTRS_o_ai vec_vaddsws(vector int __a, + vector int __b) { + return __builtin_altivec_vaddsws(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vaddsws(vector bool int __a, + vector int __b) { + return __builtin_altivec_vaddsws((vector int)__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vaddsws(vector int __a, + vector bool int __b) { + return __builtin_altivec_vaddsws(__a, (vector int)__b); +} + +/* vec_vadduws */ + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vadduws(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vadduws(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vadduws(vector bool int __a, vector unsigned int __b) { + return __builtin_altivec_vadduws((vector unsigned int)__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vadduws(vector unsigned int __a, vector bool int __b) { + return __builtin_altivec_vadduws(__a, (vector unsigned int)__b); +} + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) +/* vec_vadduqm */ + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_vadduqm(vector signed __int128 __a, vector signed __int128 __b) { + return __a + __b; +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_vadduqm(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __a + __b; +} + +/* vec_vaddeuqm */ + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_vaddeuqm(vector signed __int128 __a, vector signed __int128 __b, + vector signed __int128 __c) { + return __builtin_altivec_vaddeuqm(__a, __b, __c); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_vaddeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b, + vector unsigned __int128 __c) { + return __builtin_altivec_vaddeuqm(__a, __b, __c); +} + +/* vec_vaddcuq */ + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_vaddcuq(vector signed __int128 __a, vector signed __int128 __b) { + return __builtin_altivec_vaddcuq(__a, __b); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_vaddcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __builtin_altivec_vaddcuq(__a, __b); +} + +/* vec_vaddecuq */ + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_vaddecuq(vector signed __int128 __a, vector signed __int128 __b, + vector signed __int128 __c) { + return __builtin_altivec_vaddecuq(__a, __b, __c); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_vaddecuq(vector unsigned __int128 __a, vector unsigned __int128 __b, + vector unsigned __int128 __c) { + return __builtin_altivec_vaddecuq(__a, __b, __c); +} +#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__) + +/* vec_and */ + +#define __builtin_altivec_vand vec_and + +static __inline__ vector signed char __ATTRS_o_ai +vec_and(vector signed char __a, vector signed char __b) { + return __a & __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_and(vector bool char __a, vector signed char __b) { + return (vector signed char)__a & __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_and(vector signed char __a, vector bool char __b) { + return __a & (vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_and(vector unsigned char __a, vector unsigned char __b) { + return __a & __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_and(vector bool char __a, vector unsigned char __b) { + return (vector unsigned char)__a & __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_and(vector unsigned char __a, vector bool char __b) { + return __a & (vector unsigned char)__b; +} + +static __inline__ vector bool char __ATTRS_o_ai vec_and(vector bool char __a, + vector bool char __b) { + return __a & __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_and(vector short __a, + vector short __b) { + return __a & __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_and(vector bool short __a, + vector short __b) { + return (vector short)__a & __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_and(vector short __a, + vector bool short __b) { + return __a & (vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_and(vector unsigned short __a, vector unsigned short __b) { + return __a & __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_and(vector bool short __a, vector unsigned short __b) { + return (vector unsigned short)__a & __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_and(vector unsigned short __a, vector bool short __b) { + return __a & (vector unsigned short)__b; +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_and(vector bool short __a, vector bool short __b) { + return __a & __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_and(vector int __a, + vector int __b) { + return __a & __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_and(vector bool int __a, + vector int __b) { + return (vector int)__a & __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_and(vector int __a, + vector bool int __b) { + return __a & (vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_and(vector unsigned int __a, vector unsigned int __b) { + return __a & __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_and(vector bool int __a, vector unsigned int __b) { + return (vector unsigned int)__a & __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_and(vector unsigned int __a, vector bool int __b) { + return __a & (vector unsigned int)__b; +} + +static __inline__ vector bool int __ATTRS_o_ai vec_and(vector bool int __a, + vector bool int __b) { + return __a & __b; +} + +static __inline__ vector float __ATTRS_o_ai vec_and(vector float __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a & (vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_and(vector bool int __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a & (vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_and(vector float __a, + vector bool int __b) { + vector unsigned int __res = + (vector unsigned int)__a & (vector unsigned int)__b; + return (vector float)__res; +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_and(vector bool long long __a, + vector double __b) { + vector unsigned long long __res = + (vector unsigned long long)__a & (vector unsigned long long)__b; + return (vector double)__res; +} + +static __inline__ vector double __ATTRS_o_ai +vec_and(vector double __a, vector bool long long __b) { + vector unsigned long long __res = + (vector unsigned long long)__a & (vector unsigned long long)__b; + return (vector double)__res; +} + +static __inline__ vector double __ATTRS_o_ai vec_and(vector double __a, + vector double __b) { + vector unsigned long long __res = + (vector unsigned long long)__a & (vector unsigned long long)__b; + return (vector double)__res; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_and(vector signed long long __a, vector signed long long __b) { + return __a & __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_and(vector bool long long __a, vector signed long long __b) { + return (vector signed long long)__a & __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_and(vector signed long long __a, vector bool long long __b) { + return __a & (vector signed long long)__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_and(vector unsigned long long __a, vector unsigned long long __b) { + return __a & __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_and(vector bool long long __a, vector unsigned long long __b) { + return (vector unsigned long long)__a & __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_and(vector unsigned long long __a, vector bool long long __b) { + return __a & (vector unsigned long long)__b; +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_and(vector bool long long __a, vector bool long long __b) { + return __a & __b; +} +#endif + +/* vec_vand */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vand(vector signed char __a, vector signed char __b) { + return __a & __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vand(vector bool char __a, vector signed char __b) { + return (vector signed char)__a & __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vand(vector signed char __a, vector bool char __b) { + return __a & (vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vand(vector unsigned char __a, vector unsigned char __b) { + return __a & __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vand(vector bool char __a, vector unsigned char __b) { + return (vector unsigned char)__a & __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vand(vector unsigned char __a, vector bool char __b) { + return __a & (vector unsigned char)__b; +} + +static __inline__ vector bool char __ATTRS_o_ai vec_vand(vector bool char __a, + vector bool char __b) { + return __a & __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vand(vector short __a, + vector short __b) { + return __a & __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vand(vector bool short __a, + vector short __b) { + return (vector short)__a & __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vand(vector short __a, + vector bool short __b) { + return __a & (vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vand(vector unsigned short __a, vector unsigned short __b) { + return __a & __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vand(vector bool short __a, vector unsigned short __b) { + return (vector unsigned short)__a & __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vand(vector unsigned short __a, vector bool short __b) { + return __a & (vector unsigned short)__b; +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vand(vector bool short __a, vector bool short __b) { + return __a & __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vand(vector int __a, + vector int __b) { + return __a & __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vand(vector bool int __a, + vector int __b) { + return (vector int)__a & __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vand(vector int __a, + vector bool int __b) { + return __a & (vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vand(vector unsigned int __a, vector unsigned int __b) { + return __a & __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vand(vector bool int __a, vector unsigned int __b) { + return (vector unsigned int)__a & __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vand(vector unsigned int __a, vector bool int __b) { + return __a & (vector unsigned int)__b; +} + +static __inline__ vector bool int __ATTRS_o_ai vec_vand(vector bool int __a, + vector bool int __b) { + return __a & __b; +} + +static __inline__ vector float __ATTRS_o_ai vec_vand(vector float __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a & (vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_vand(vector bool int __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a & (vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_vand(vector float __a, + vector bool int __b) { + vector unsigned int __res = + (vector unsigned int)__a & (vector unsigned int)__b; + return (vector float)__res; +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_vand(vector signed long long __a, vector signed long long __b) { + return __a & __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_vand(vector bool long long __a, vector signed long long __b) { + return (vector signed long long)__a & __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_vand(vector signed long long __a, vector bool long long __b) { + return __a & (vector signed long long)__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vand(vector unsigned long long __a, vector unsigned long long __b) { + return __a & __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vand(vector bool long long __a, vector unsigned long long __b) { + return (vector unsigned long long)__a & __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vand(vector unsigned long long __a, vector bool long long __b) { + return __a & (vector unsigned long long)__b; +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_vand(vector bool long long __a, vector bool long long __b) { + return __a & __b; +} +#endif + +/* vec_andc */ + +#define __builtin_altivec_vandc vec_andc + +static __inline__ vector signed char __ATTRS_o_ai +vec_andc(vector signed char __a, vector signed char __b) { + return __a & ~__b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_andc(vector bool char __a, vector signed char __b) { + return (vector signed char)__a & ~__b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_andc(vector signed char __a, vector bool char __b) { + return __a & ~(vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_andc(vector unsigned char __a, vector unsigned char __b) { + return __a & ~__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_andc(vector bool char __a, vector unsigned char __b) { + return (vector unsigned char)__a & ~__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_andc(vector unsigned char __a, vector bool char __b) { + return __a & ~(vector unsigned char)__b; +} + +static __inline__ vector bool char __ATTRS_o_ai vec_andc(vector bool char __a, + vector bool char __b) { + return __a & ~__b; +} + +static __inline__ vector short __ATTRS_o_ai vec_andc(vector short __a, + vector short __b) { + return __a & ~__b; +} + +static __inline__ vector short __ATTRS_o_ai vec_andc(vector bool short __a, + vector short __b) { + return (vector short)__a & ~__b; +} + +static __inline__ vector short __ATTRS_o_ai vec_andc(vector short __a, + vector bool short __b) { + return __a & ~(vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_andc(vector unsigned short __a, vector unsigned short __b) { + return __a & ~__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_andc(vector bool short __a, vector unsigned short __b) { + return (vector unsigned short)__a & ~__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_andc(vector unsigned short __a, vector bool short __b) { + return __a & ~(vector unsigned short)__b; +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_andc(vector bool short __a, vector bool short __b) { + return __a & ~__b; +} + +static __inline__ vector int __ATTRS_o_ai vec_andc(vector int __a, + vector int __b) { + return __a & ~__b; +} + +static __inline__ vector int __ATTRS_o_ai vec_andc(vector bool int __a, + vector int __b) { + return (vector int)__a & ~__b; +} + +static __inline__ vector int __ATTRS_o_ai vec_andc(vector int __a, + vector bool int __b) { + return __a & ~(vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_andc(vector unsigned int __a, vector unsigned int __b) { + return __a & ~__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_andc(vector bool int __a, vector unsigned int __b) { + return (vector unsigned int)__a & ~__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_andc(vector unsigned int __a, vector bool int __b) { + return __a & ~(vector unsigned int)__b; +} + +static __inline__ vector bool int __ATTRS_o_ai vec_andc(vector bool int __a, + vector bool int __b) { + return __a & ~__b; +} + +static __inline__ vector float __ATTRS_o_ai vec_andc(vector float __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a & ~(vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_andc(vector bool int __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a & ~(vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_andc(vector float __a, + vector bool int __b) { + vector unsigned int __res = + (vector unsigned int)__a & ~(vector unsigned int)__b; + return (vector float)__res; +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_andc(vector bool long long __a, + vector double __b) { + vector unsigned long long __res = + (vector unsigned long long)__a & ~(vector unsigned long long)__b; + return (vector double)__res; +} + +static __inline__ vector double __ATTRS_o_ai +vec_andc(vector double __a, vector bool long long __b) { + vector unsigned long long __res = + (vector unsigned long long)__a & ~(vector unsigned long long)__b; + return (vector double)__res; +} + +static __inline__ vector double __ATTRS_o_ai vec_andc(vector double __a, + vector double __b) { + vector unsigned long long __res = + (vector unsigned long long)__a & ~(vector unsigned long long)__b; + return (vector double)__res; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_andc(vector signed long long __a, vector signed long long __b) { + return __a & ~__b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_andc(vector bool long long __a, vector signed long long __b) { + return (vector signed long long)__a & ~__b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_andc(vector signed long long __a, vector bool long long __b) { + return __a & ~(vector signed long long)__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_andc(vector unsigned long long __a, vector unsigned long long __b) { + return __a & ~__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_andc(vector bool long long __a, vector unsigned long long __b) { + return (vector unsigned long long)__a & ~__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_andc(vector unsigned long long __a, vector bool long long __b) { + return __a & ~(vector unsigned long long)__b; +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_andc(vector bool long long __a, vector bool long long __b) { + return __a & ~__b; +} +#endif + +/* vec_vandc */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vandc(vector signed char __a, vector signed char __b) { + return __a & ~__b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vandc(vector bool char __a, vector signed char __b) { + return (vector signed char)__a & ~__b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vandc(vector signed char __a, vector bool char __b) { + return __a & ~(vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vandc(vector unsigned char __a, vector unsigned char __b) { + return __a & ~__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vandc(vector bool char __a, vector unsigned char __b) { + return (vector unsigned char)__a & ~__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vandc(vector unsigned char __a, vector bool char __b) { + return __a & ~(vector unsigned char)__b; +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_vandc(vector bool char __a, vector bool char __b) { + return __a & ~__b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vandc(vector short __a, + vector short __b) { + return __a & ~__b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vandc(vector bool short __a, + vector short __b) { + return (vector short)__a & ~__b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vandc(vector short __a, + vector bool short __b) { + return __a & ~(vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vandc(vector unsigned short __a, vector unsigned short __b) { + return __a & ~__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vandc(vector bool short __a, vector unsigned short __b) { + return (vector unsigned short)__a & ~__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vandc(vector unsigned short __a, vector bool short __b) { + return __a & ~(vector unsigned short)__b; +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vandc(vector bool short __a, vector bool short __b) { + return __a & ~__b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vandc(vector int __a, + vector int __b) { + return __a & ~__b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vandc(vector bool int __a, + vector int __b) { + return (vector int)__a & ~__b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vandc(vector int __a, + vector bool int __b) { + return __a & ~(vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vandc(vector unsigned int __a, vector unsigned int __b) { + return __a & ~__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vandc(vector bool int __a, vector unsigned int __b) { + return (vector unsigned int)__a & ~__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vandc(vector unsigned int __a, vector bool int __b) { + return __a & ~(vector unsigned int)__b; +} + +static __inline__ vector bool int __ATTRS_o_ai vec_vandc(vector bool int __a, + vector bool int __b) { + return __a & ~__b; +} + +static __inline__ vector float __ATTRS_o_ai vec_vandc(vector float __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a & ~(vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_vandc(vector bool int __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a & ~(vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_vandc(vector float __a, + vector bool int __b) { + vector unsigned int __res = + (vector unsigned int)__a & ~(vector unsigned int)__b; + return (vector float)__res; +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_vandc(vector signed long long __a, vector signed long long __b) { + return __a & ~__b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_vandc(vector bool long long __a, vector signed long long __b) { + return (vector signed long long)__a & ~__b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_vandc(vector signed long long __a, vector bool long long __b) { + return __a & ~(vector signed long long)__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vandc(vector unsigned long long __a, vector unsigned long long __b) { + return __a & ~__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vandc(vector bool long long __a, vector unsigned long long __b) { + return (vector unsigned long long)__a & ~__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vandc(vector unsigned long long __a, vector bool long long __b) { + return __a & ~(vector unsigned long long)__b; +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_vandc(vector bool long long __a, vector bool long long __b) { + return __a & ~__b; +} +#endif + +/* vec_avg */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_avg(vector signed char __a, vector signed char __b) { + return __builtin_altivec_vavgsb(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_avg(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vavgub(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_avg(vector short __a, + vector short __b) { + return __builtin_altivec_vavgsh(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_avg(vector unsigned short __a, vector unsigned short __b) { + return __builtin_altivec_vavguh(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_avg(vector int __a, + vector int __b) { + return __builtin_altivec_vavgsw(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_avg(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vavguw(__a, __b); +} + +/* vec_vavgsb */ + +static __inline__ vector signed char __attribute__((__always_inline__)) +vec_vavgsb(vector signed char __a, vector signed char __b) { + return __builtin_altivec_vavgsb(__a, __b); +} + +/* vec_vavgub */ + +static __inline__ vector unsigned char __attribute__((__always_inline__)) +vec_vavgub(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vavgub(__a, __b); +} + +/* vec_vavgsh */ + +static __inline__ vector short __attribute__((__always_inline__)) +vec_vavgsh(vector short __a, vector short __b) { + return __builtin_altivec_vavgsh(__a, __b); +} + +/* vec_vavguh */ + +static __inline__ vector unsigned short __attribute__((__always_inline__)) +vec_vavguh(vector unsigned short __a, vector unsigned short __b) { + return __builtin_altivec_vavguh(__a, __b); +} + +/* vec_vavgsw */ + +static __inline__ vector int __attribute__((__always_inline__)) +vec_vavgsw(vector int __a, vector int __b) { + return __builtin_altivec_vavgsw(__a, __b); +} + +/* vec_vavguw */ + +static __inline__ vector unsigned int __attribute__((__always_inline__)) +vec_vavguw(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vavguw(__a, __b); +} + +/* vec_ceil */ + +static __inline__ vector float __ATTRS_o_ai vec_ceil(vector float __a) { +#ifdef __VSX__ + return __builtin_vsx_xvrspip(__a); +#else + return __builtin_altivec_vrfip(__a); +#endif +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_ceil(vector double __a) { + return __builtin_vsx_xvrdpip(__a); +} +#endif + +/* vec_vrfip */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vrfip(vector float __a) { + return __builtin_altivec_vrfip(__a); +} + +/* vec_cmpb */ + +static __inline__ vector int __attribute__((__always_inline__)) +vec_cmpb(vector float __a, vector float __b) { + return __builtin_altivec_vcmpbfp(__a, __b); +} + +/* vec_vcmpbfp */ + +static __inline__ vector int __attribute__((__always_inline__)) +vec_vcmpbfp(vector float __a, vector float __b) { + return __builtin_altivec_vcmpbfp(__a, __b); +} + +/* vec_cmpeq */ + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpeq(vector signed char __a, vector signed char __b) { + return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a, + (vector char)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpeq(vector unsigned char __a, vector unsigned char __b) { + return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a, + (vector char)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpeq(vector bool char __a, vector bool char __b) { + return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a, + (vector char)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai vec_cmpeq(vector short __a, + vector short __b) { + return (vector bool short)__builtin_altivec_vcmpequh(__a, __b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmpeq(vector unsigned short __a, vector unsigned short __b) { + return (vector bool short)__builtin_altivec_vcmpequh((vector short)__a, + (vector short)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmpeq(vector bool short __a, vector bool short __b) { + return (vector bool short)__builtin_altivec_vcmpequh((vector short)__a, + (vector short)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector int __a, + vector int __b) { + return (vector bool int)__builtin_altivec_vcmpequw(__a, __b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmpeq(vector unsigned int __a, vector unsigned int __b) { + return (vector bool int)__builtin_altivec_vcmpequw((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector bool int __a, + vector bool int __b) { + return (vector bool int)__builtin_altivec_vcmpequw((vector int)__a, + (vector int)__b); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpeq(vector signed long long __a, vector signed long long __b) { + return (vector bool long long)__builtin_altivec_vcmpequd(__a, __b); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) { + return (vector bool long long)__builtin_altivec_vcmpequd( + (vector long long)__a, (vector long long)__b); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpeq(vector bool long long __a, vector bool long long __b) { + return (vector bool long long)__builtin_altivec_vcmpequd( + (vector long long)__a, (vector long long)__b); +} + +#endif + +static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector float __a, + vector float __b) { +#ifdef __VSX__ + return (vector bool int)__builtin_vsx_xvcmpeqsp(__a, __b); +#else + return (vector bool int)__builtin_altivec_vcmpeqfp(__a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpeq(vector double __a, vector double __b) { + return (vector bool long long)__builtin_vsx_xvcmpeqdp(__a, __b); +} +#endif + +#ifdef __POWER9_VECTOR__ +/* vec_cmpne */ + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpne(vector bool char __a, vector bool char __b) { + return (vector bool char)__builtin_altivec_vcmpneb((vector char)__a, + (vector char)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpne(vector signed char __a, vector signed char __b) { + return (vector bool char)__builtin_altivec_vcmpneb((vector char)__a, + (vector char)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpne(vector unsigned char __a, vector unsigned char __b) { + return (vector bool char)__builtin_altivec_vcmpneb((vector char)__a, + (vector char)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmpne(vector bool short __a, vector bool short __b) { + return (vector bool short)__builtin_altivec_vcmpneh((vector short)__a, + (vector short)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmpne(vector signed short __a, vector signed short __b) { + return (vector bool short)__builtin_altivec_vcmpneh((vector short)__a, + (vector short)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmpne(vector unsigned short __a, vector unsigned short __b) { + return (vector bool short)__builtin_altivec_vcmpneh((vector short)__a, + (vector short)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmpne(vector bool int __a, vector bool int __b) { + return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmpne(vector signed int __a, vector signed int __b) { + return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmpne(vector unsigned int __a, vector unsigned int __b) { + return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpne(vector bool long long __a, vector bool long long __b) { + return (vector bool long long) + ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b)); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpne(vector signed long long __a, vector signed long long __b) { + return (vector bool long long) + ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b)); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpne(vector unsigned long long __a, vector unsigned long long __b) { + return (vector bool long long) + ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b)); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmpne(vector float __a, vector float __b) { + return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpne(vector double __a, vector double __b) { + return (vector bool long long) + ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b)); +} + +/* vec_cmpnez */ + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpnez(vector signed char __a, vector signed char __b) { + return (vector bool char)__builtin_altivec_vcmpnezb((vector char)__a, + (vector char)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpnez(vector unsigned char __a, vector unsigned char __b) { + return (vector bool char)__builtin_altivec_vcmpnezb((vector char)__a, + (vector char)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmpnez(vector signed short __a, vector signed short __b) { + return (vector bool short)__builtin_altivec_vcmpnezh((vector short)__a, + (vector short)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmpnez(vector unsigned short __a, vector unsigned short __b) { + return (vector bool short)__builtin_altivec_vcmpnezh((vector short)__a, + (vector short)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmpnez(vector signed int __a, vector signed int __b) { + return (vector bool int)__builtin_altivec_vcmpnezw((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmpnez(vector unsigned int __a, vector unsigned int __b) { + return (vector bool int)__builtin_altivec_vcmpnezw((vector int)__a, + (vector int)__b); +} + +static __inline__ signed int __ATTRS_o_ai +vec_cntlz_lsbb(vector signed char __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vctzlsbb(__a); +#else + return __builtin_altivec_vclzlsbb(__a); +#endif +} + +static __inline__ signed int __ATTRS_o_ai +vec_cntlz_lsbb(vector unsigned char __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vctzlsbb(__a); +#else + return __builtin_altivec_vclzlsbb(__a); +#endif +} + +static __inline__ signed int __ATTRS_o_ai +vec_cnttz_lsbb(vector signed char __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vclzlsbb(__a); +#else + return __builtin_altivec_vctzlsbb(__a); +#endif +} + +static __inline__ signed int __ATTRS_o_ai +vec_cnttz_lsbb(vector unsigned char __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vclzlsbb(__a); +#else + return __builtin_altivec_vctzlsbb(__a); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_parity_lsbb(vector unsigned int __a) { + return __builtin_altivec_vprtybw(__a); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_parity_lsbb(vector signed int __a) { + return __builtin_altivec_vprtybw(__a); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_parity_lsbb(vector unsigned __int128 __a) { + return __builtin_altivec_vprtybq(__a); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_parity_lsbb(vector signed __int128 __a) { + return __builtin_altivec_vprtybq(__a); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_parity_lsbb(vector unsigned long long __a) { + return __builtin_altivec_vprtybd(__a); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_parity_lsbb(vector signed long long __a) { + return __builtin_altivec_vprtybd(__a); +} + +#endif + +/* vec_cmpgt */ + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpgt(vector signed char __a, vector signed char __b) { + return (vector bool char)__builtin_altivec_vcmpgtsb(__a, __b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpgt(vector unsigned char __a, vector unsigned char __b) { + return (vector bool char)__builtin_altivec_vcmpgtub(__a, __b); +} + +static __inline__ vector bool short __ATTRS_o_ai vec_cmpgt(vector short __a, + vector short __b) { + return (vector bool short)__builtin_altivec_vcmpgtsh(__a, __b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmpgt(vector unsigned short __a, vector unsigned short __b) { + return (vector bool short)__builtin_altivec_vcmpgtuh(__a, __b); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_cmpgt(vector int __a, + vector int __b) { + return (vector bool int)__builtin_altivec_vcmpgtsw(__a, __b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmpgt(vector unsigned int __a, vector unsigned int __b) { + return (vector bool int)__builtin_altivec_vcmpgtuw(__a, __b); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpgt(vector signed long long __a, vector signed long long __b) { + return (vector bool long long)__builtin_altivec_vcmpgtsd(__a, __b); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) { + return (vector bool long long)__builtin_altivec_vcmpgtud(__a, __b); +} +#endif + +static __inline__ vector bool int __ATTRS_o_ai vec_cmpgt(vector float __a, + vector float __b) { +#ifdef __VSX__ + return (vector bool int)__builtin_vsx_xvcmpgtsp(__a, __b); +#else + return (vector bool int)__builtin_altivec_vcmpgtfp(__a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpgt(vector double __a, vector double __b) { + return (vector bool long long)__builtin_vsx_xvcmpgtdp(__a, __b); +} +#endif + +/* vec_cmpge */ + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpge(vector signed char __a, vector signed char __b) { + return ~(vec_cmpgt(__b, __a)); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmpge(vector unsigned char __a, vector unsigned char __b) { + return ~(vec_cmpgt(__b, __a)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmpge(vector signed short __a, vector signed short __b) { + return ~(vec_cmpgt(__b, __a)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmpge(vector unsigned short __a, vector unsigned short __b) { + return ~(vec_cmpgt(__b, __a)); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmpge(vector signed int __a, vector signed int __b) { + return ~(vec_cmpgt(__b, __a)); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmpge(vector unsigned int __a, vector unsigned int __b) { + return ~(vec_cmpgt(__b, __a)); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_cmpge(vector float __a, + vector float __b) { +#ifdef __VSX__ + return (vector bool int)__builtin_vsx_xvcmpgesp(__a, __b); +#else + return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpge(vector double __a, vector double __b) { + return (vector bool long long)__builtin_vsx_xvcmpgedp(__a, __b); +} +#endif + +#ifdef __POWER8_VECTOR__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpge(vector signed long long __a, vector signed long long __b) { + return ~(vec_cmpgt(__b, __a)); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) { + return ~(vec_cmpgt(__b, __a)); +} +#endif + +/* vec_vcmpgefp */ + +static __inline__ vector bool int __attribute__((__always_inline__)) +vec_vcmpgefp(vector float __a, vector float __b) { + return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b); +} + +/* vec_vcmpgtsb */ + +static __inline__ vector bool char __attribute__((__always_inline__)) +vec_vcmpgtsb(vector signed char __a, vector signed char __b) { + return (vector bool char)__builtin_altivec_vcmpgtsb(__a, __b); +} + +/* vec_vcmpgtub */ + +static __inline__ vector bool char __attribute__((__always_inline__)) +vec_vcmpgtub(vector unsigned char __a, vector unsigned char __b) { + return (vector bool char)__builtin_altivec_vcmpgtub(__a, __b); +} + +/* vec_vcmpgtsh */ + +static __inline__ vector bool short __attribute__((__always_inline__)) +vec_vcmpgtsh(vector short __a, vector short __b) { + return (vector bool short)__builtin_altivec_vcmpgtsh(__a, __b); +} + +/* vec_vcmpgtuh */ + +static __inline__ vector bool short __attribute__((__always_inline__)) +vec_vcmpgtuh(vector unsigned short __a, vector unsigned short __b) { + return (vector bool short)__builtin_altivec_vcmpgtuh(__a, __b); +} + +/* vec_vcmpgtsw */ + +static __inline__ vector bool int __attribute__((__always_inline__)) +vec_vcmpgtsw(vector int __a, vector int __b) { + return (vector bool int)__builtin_altivec_vcmpgtsw(__a, __b); +} + +/* vec_vcmpgtuw */ + +static __inline__ vector bool int __attribute__((__always_inline__)) +vec_vcmpgtuw(vector unsigned int __a, vector unsigned int __b) { + return (vector bool int)__builtin_altivec_vcmpgtuw(__a, __b); +} + +/* vec_vcmpgtfp */ + +static __inline__ vector bool int __attribute__((__always_inline__)) +vec_vcmpgtfp(vector float __a, vector float __b) { + return (vector bool int)__builtin_altivec_vcmpgtfp(__a, __b); +} + +/* vec_cmple */ + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmple(vector signed char __a, vector signed char __b) { + return vec_cmpge(__b, __a); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmple(vector unsigned char __a, vector unsigned char __b) { + return vec_cmpge(__b, __a); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmple(vector signed short __a, vector signed short __b) { + return vec_cmpge(__b, __a); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmple(vector unsigned short __a, vector unsigned short __b) { + return vec_cmpge(__b, __a); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmple(vector signed int __a, vector signed int __b) { + return vec_cmpge(__b, __a); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmple(vector unsigned int __a, vector unsigned int __b) { + return vec_cmpge(__b, __a); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_cmple(vector float __a, + vector float __b) { + return vec_cmpge(__b, __a); +} + +#ifdef __VSX__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmple(vector double __a, vector double __b) { + return vec_cmpge(__b, __a); +} +#endif + +#ifdef __POWER8_VECTOR__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmple(vector signed long long __a, vector signed long long __b) { + return vec_cmpge(__b, __a); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmple(vector unsigned long long __a, vector unsigned long long __b) { + return vec_cmpge(__b, __a); +} +#endif + +/* vec_cmplt */ + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmplt(vector signed char __a, vector signed char __b) { + return vec_cmpgt(__b, __a); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_cmplt(vector unsigned char __a, vector unsigned char __b) { + return vec_cmpgt(__b, __a); +} + +static __inline__ vector bool short __ATTRS_o_ai vec_cmplt(vector short __a, + vector short __b) { + return vec_cmpgt(__b, __a); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_cmplt(vector unsigned short __a, vector unsigned short __b) { + return vec_cmpgt(__b, __a); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_cmplt(vector int __a, + vector int __b) { + return vec_cmpgt(__b, __a); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_cmplt(vector unsigned int __a, vector unsigned int __b) { + return vec_cmpgt(__b, __a); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_cmplt(vector float __a, + vector float __b) { + return vec_cmpgt(__b, __a); +} + +#ifdef __VSX__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmplt(vector double __a, vector double __b) { + return vec_cmpgt(__b, __a); +} +#endif + +#ifdef __POWER8_VECTOR__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmplt(vector signed long long __a, vector signed long long __b) { + return vec_cmpgt(__b, __a); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) { + return vec_cmpgt(__b, __a); +} + +/* vec_popcnt */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_popcnt(vector signed char __a) { + return __builtin_altivec_vpopcntb(__a); +} +static __inline__ vector unsigned char __ATTRS_o_ai +vec_popcnt(vector unsigned char __a) { + return __builtin_altivec_vpopcntb(__a); +} +static __inline__ vector signed short __ATTRS_o_ai +vec_popcnt(vector signed short __a) { + return __builtin_altivec_vpopcnth(__a); +} +static __inline__ vector unsigned short __ATTRS_o_ai +vec_popcnt(vector unsigned short __a) { + return __builtin_altivec_vpopcnth(__a); +} +static __inline__ vector signed int __ATTRS_o_ai +vec_popcnt(vector signed int __a) { + return __builtin_altivec_vpopcntw(__a); +} +static __inline__ vector unsigned int __ATTRS_o_ai +vec_popcnt(vector unsigned int __a) { + return __builtin_altivec_vpopcntw(__a); +} +static __inline__ vector signed long long __ATTRS_o_ai +vec_popcnt(vector signed long long __a) { + return __builtin_altivec_vpopcntd(__a); +} +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_popcnt(vector unsigned long long __a) { + return __builtin_altivec_vpopcntd(__a); +} + +/* vec_cntlz */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_cntlz(vector signed char __a) { + return __builtin_altivec_vclzb(__a); +} +static __inline__ vector unsigned char __ATTRS_o_ai +vec_cntlz(vector unsigned char __a) { + return __builtin_altivec_vclzb(__a); +} +static __inline__ vector signed short __ATTRS_o_ai +vec_cntlz(vector signed short __a) { + return __builtin_altivec_vclzh(__a); +} +static __inline__ vector unsigned short __ATTRS_o_ai +vec_cntlz(vector unsigned short __a) { + return __builtin_altivec_vclzh(__a); +} +static __inline__ vector signed int __ATTRS_o_ai +vec_cntlz(vector signed int __a) { + return __builtin_altivec_vclzw(__a); +} +static __inline__ vector unsigned int __ATTRS_o_ai +vec_cntlz(vector unsigned int __a) { + return __builtin_altivec_vclzw(__a); +} +static __inline__ vector signed long long __ATTRS_o_ai +vec_cntlz(vector signed long long __a) { + return __builtin_altivec_vclzd(__a); +} +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_cntlz(vector unsigned long long __a) { + return __builtin_altivec_vclzd(__a); +} +#endif + +#ifdef __POWER9_VECTOR__ + +/* vec_cnttz */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_cnttz(vector signed char __a) { + return __builtin_altivec_vctzb(__a); +} +static __inline__ vector unsigned char __ATTRS_o_ai +vec_cnttz(vector unsigned char __a) { + return __builtin_altivec_vctzb(__a); +} +static __inline__ vector signed short __ATTRS_o_ai +vec_cnttz(vector signed short __a) { + return __builtin_altivec_vctzh(__a); +} +static __inline__ vector unsigned short __ATTRS_o_ai +vec_cnttz(vector unsigned short __a) { + return __builtin_altivec_vctzh(__a); +} +static __inline__ vector signed int __ATTRS_o_ai +vec_cnttz(vector signed int __a) { + return __builtin_altivec_vctzw(__a); +} +static __inline__ vector unsigned int __ATTRS_o_ai +vec_cnttz(vector unsigned int __a) { + return __builtin_altivec_vctzw(__a); +} +static __inline__ vector signed long long __ATTRS_o_ai +vec_cnttz(vector signed long long __a) { + return __builtin_altivec_vctzd(__a); +} +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_cnttz(vector unsigned long long __a) { + return __builtin_altivec_vctzd(__a); +} + +/* vec_first_match_index */ + +static __inline__ unsigned __ATTRS_o_ai +vec_first_match_index(vector signed char __a, vector signed char __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 3; + } + return __res[0] >> 3; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_match_index(vector unsigned char __a, vector unsigned char __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 3; + } + return __res[0] >> 3; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_match_index(vector signed short __a, vector signed short __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 4; + } + return __res[0] >> 4; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_match_index(vector unsigned short __a, vector unsigned short __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 4; + } + return __res[0] >> 4; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_match_index(vector signed int __a, vector signed int __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 5; + } + return __res[0] >> 5; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_match_index(vector unsigned int __a, vector unsigned int __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 5; + } + return __res[0] >> 5; +} + +/* vec_first_match_or_eos_index */ + +static __inline__ unsigned __ATTRS_o_ai +vec_first_match_or_eos_index(vector signed char __a, vector signed char __b) { + /* Compare the result of the comparison of two vectors with either and OR the + result. Either the elements are equal or one will equal the comparison + result if either is zero. + */ + vector bool char __tmp1 = vec_cmpeq(__a, __b); + vector bool char __tmp2 = __tmp1 | + vec_cmpeq((vector signed char)__tmp1, __a) | + vec_cmpeq((vector signed char)__tmp1, __b); + + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)__tmp2); +#else + vec_cntlz((vector unsigned long long)__tmp2); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 3; + } + return __res[0] >> 3; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_match_or_eos_index(vector unsigned char __a, + vector unsigned char __b) { + vector bool char __tmp1 = vec_cmpeq(__a, __b); + vector bool char __tmp2 = __tmp1 | + vec_cmpeq((vector unsigned char)__tmp1, __a) | + vec_cmpeq((vector unsigned char)__tmp1, __b); + + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)__tmp2); +#else + vec_cntlz((vector unsigned long long)__tmp2); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 3; + } + return __res[0] >> 3; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_match_or_eos_index(vector signed short __a, vector signed short __b) { + vector bool short __tmp1 = vec_cmpeq(__a, __b); + vector bool short __tmp2 = __tmp1 | + vec_cmpeq((vector signed short)__tmp1, __a) | + vec_cmpeq((vector signed short)__tmp1, __b); + + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)__tmp2); +#else + vec_cntlz((vector unsigned long long)__tmp2); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 4; + } + return __res[0] >> 4; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_match_or_eos_index(vector unsigned short __a, + vector unsigned short __b) { + vector bool short __tmp1 = vec_cmpeq(__a, __b); + vector bool short __tmp2 = __tmp1 | + vec_cmpeq((vector unsigned short)__tmp1, __a) | + vec_cmpeq((vector unsigned short)__tmp1, __b); + + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)__tmp2); +#else + vec_cntlz((vector unsigned long long)__tmp2); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 4; + } + return __res[0] >> 4; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_match_or_eos_index(vector signed int __a, vector signed int __b) { + vector bool int __tmp1 = vec_cmpeq(__a, __b); + vector bool int __tmp2 = __tmp1 | vec_cmpeq((vector signed int)__tmp1, __a) | + vec_cmpeq((vector signed int)__tmp1, __b); + + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)__tmp2); +#else + vec_cntlz((vector unsigned long long)__tmp2); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 5; + } + return __res[0] >> 5; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_match_or_eos_index(vector unsigned int __a, vector unsigned int __b) { + vector bool int __tmp1 = vec_cmpeq(__a, __b); + vector bool int __tmp2 = __tmp1 | + vec_cmpeq((vector unsigned int)__tmp1, __a) | + vec_cmpeq((vector unsigned int)__tmp1, __b); + + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)__tmp2); +#else + vec_cntlz((vector unsigned long long)__tmp2); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 5; + } + return __res[0] >> 5; +} + +/* vec_first_mismatch_index */ + +static __inline__ unsigned __ATTRS_o_ai +vec_first_mismatch_index(vector signed char __a, vector signed char __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 3; + } + return __res[0] >> 3; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_mismatch_index(vector unsigned char __a, vector unsigned char __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 3; + } + return __res[0] >> 3; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_mismatch_index(vector signed short __a, vector signed short __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 4; + } + return __res[0] >> 4; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_mismatch_index(vector unsigned short __a, vector unsigned short __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 4; + } + return __res[0] >> 4; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_mismatch_index(vector signed int __a, vector signed int __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 5; + } + return __res[0] >> 5; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_mismatch_index(vector unsigned int __a, vector unsigned int __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 5; + } + return __res[0] >> 5; +} + +/* vec_first_mismatch_or_eos_index */ + +static __inline__ unsigned __ATTRS_o_ai +vec_first_mismatch_or_eos_index(vector signed char __a, + vector signed char __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 3; + } + return __res[0] >> 3; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_mismatch_or_eos_index(vector unsigned char __a, + vector unsigned char __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 3; + } + return __res[0] >> 3; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_mismatch_or_eos_index(vector signed short __a, + vector signed short __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 4; + } + return __res[0] >> 4; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_mismatch_or_eos_index(vector unsigned short __a, + vector unsigned short __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 4; + } + return __res[0] >> 4; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_mismatch_or_eos_index(vector signed int __a, vector signed int __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 5; + } + return __res[0] >> 5; +} + +static __inline__ unsigned __ATTRS_o_ai +vec_first_mismatch_or_eos_index(vector unsigned int __a, + vector unsigned int __b) { + vector unsigned long long __res = +#ifdef __LITTLE_ENDIAN__ + vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b)); +#else + vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b)); +#endif + if (__res[0] == 64) { + return (__res[1] + 64) >> 5; + } + return __res[0] >> 5; +} + +static __inline__ vector double __ATTRS_o_ai +vec_insert_exp(vector double __a, vector unsigned long long __b) { + return __builtin_vsx_xviexpdp((vector unsigned long long)__a,__b); +} + +static __inline__ vector double __ATTRS_o_ai +vec_insert_exp(vector unsigned long long __a, vector unsigned long long __b) { + return __builtin_vsx_xviexpdp(__a,__b); +} + +static __inline__ vector float __ATTRS_o_ai +vec_insert_exp(vector float __a, vector unsigned int __b) { + return __builtin_vsx_xviexpsp((vector unsigned int)__a,__b); +} + +static __inline__ vector float __ATTRS_o_ai +vec_insert_exp(vector unsigned int __a, vector unsigned int __b) { + return __builtin_vsx_xviexpsp(__a,__b); +} + +#if defined(__powerpc64__) +static __inline__ vector signed char __ATTRS_o_ai vec_xl_len(signed char *__a, + size_t __b) { + return (vector signed char)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_xl_len(unsigned char *__a, size_t __b) { + return (vector unsigned char)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector signed short __ATTRS_o_ai vec_xl_len(signed short *__a, + size_t __b) { + return (vector signed short)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_xl_len(unsigned short *__a, size_t __b) { + return (vector unsigned short)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector signed int __ATTRS_o_ai vec_xl_len(signed int *__a, + size_t __b) { + return (vector signed int)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai vec_xl_len(unsigned int *__a, + size_t __b) { + return (vector unsigned int)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector float __ATTRS_o_ai vec_xl_len(float *__a, size_t __b) { + return (vector float)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_xl_len(signed __int128 *__a, size_t __b) { + return (vector signed __int128)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_xl_len(unsigned __int128 *__a, size_t __b) { + return (vector unsigned __int128)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_xl_len(signed long long *__a, size_t __b) { + return (vector signed long long)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_xl_len(unsigned long long *__a, size_t __b) { + return (vector unsigned long long)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector double __ATTRS_o_ai vec_xl_len(double *__a, + size_t __b) { + return (vector double)__builtin_vsx_lxvl(__a, (__b << 56)); +} + +static __inline__ vector double __ATTRS_o_ai vec_xl_len_r(unsigned char *__a, + size_t __b) { + vector unsigned char __res = + (vector unsigned char)__builtin_vsx_lxvll(__a, (__b << 56)); +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __mask = + (vector unsigned char)__builtin_altivec_lvsr(16 - __b, (int *)NULL); + __res = (vector unsigned char)__builtin_altivec_vperm_4si( + (vector int)__res, (vector int)__res, __mask); +#endif + return __res; +} + +// vec_xst_len +static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned char __a, + unsigned char *__b, + size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed char __a, + signed char *__b, size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed short __a, + signed short *__b, size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned short __a, + unsigned short *__b, + size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed int __a, + signed int *__b, size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned int __a, + unsigned int *__b, size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector float __a, float *__b, + size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed __int128 __a, + signed __int128 *__b, + size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned __int128 __a, + unsigned __int128 *__b, + size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed long long __a, + signed long long *__b, + size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned long long __a, + unsigned long long *__b, + size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len(vector double __a, double *__b, + size_t __c) { + return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56)); +} + +static __inline__ void __ATTRS_o_ai vec_xst_len_r(vector unsigned char __a, + unsigned char *__b, + size_t __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __mask = + (vector unsigned char)__builtin_altivec_lvsl(16 - __c, (int *)NULL); + vector unsigned char __res = + __builtin_altivec_vperm_4si((vector int)__a, (vector int)__a, __mask); + return __builtin_vsx_stxvll((vector int)__res, __b, (__c << 56)); +#else + return __builtin_vsx_stxvll((vector int)__a, __b, (__c << 56)); +#endif +} +#endif +#endif + +/* vec_cpsgn */ + +#ifdef __VSX__ +static __inline__ vector float __ATTRS_o_ai vec_cpsgn(vector float __a, + vector float __b) { + return __builtin_vsx_xvcpsgnsp(__a, __b); +} + +static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a, + vector double __b) { + return __builtin_vsx_xvcpsgndp(__a, __b); +} +#endif + +/* vec_ctf */ + +static __inline__ vector float __ATTRS_o_ai vec_ctf(vector int __a, int __b) { + return __builtin_altivec_vcfsx(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_ctf(vector unsigned int __a, + int __b) { + return __builtin_altivec_vcfux((vector int)__a, __b); +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai +vec_ctf(vector unsigned long long __a, int __b) { + vector double __ret = __builtin_convertvector(__a, vector double); + __ret *= (vector double)(vector unsigned long long)((0x3ffULL - __b) << 52); + return __ret; +} + +static __inline__ vector double __ATTRS_o_ai +vec_ctf(vector signed long long __a, int __b) { + vector double __ret = __builtin_convertvector(__a, vector double); + __ret *= (vector double)(vector unsigned long long)((0x3ffULL - __b) << 52); + return __ret; +} +#endif + +/* vec_vcfsx */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vcfsx(vector int __a, int __b) { + return __builtin_altivec_vcfsx(__a, __b); +} + +/* vec_vcfux */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vcfux(vector unsigned int __a, int __b) { + return __builtin_altivec_vcfux((vector int)__a, __b); +} + +/* vec_cts */ + +static __inline__ vector int __ATTRS_o_ai vec_cts(vector float __a, int __b) { + return __builtin_altivec_vctsxs(__a, __b); +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_cts(vector double __a, int __b) { + __a *= (vector double)(vector unsigned long long)((0x3ffULL + __b) << 52); + return __builtin_convertvector(__a, vector signed long long); +} +#endif + +/* vec_vctsxs */ + +static __inline__ vector int __attribute__((__always_inline__)) +vec_vctsxs(vector float __a, int __b) { + return __builtin_altivec_vctsxs(__a, __b); +} + +/* vec_ctu */ + +static __inline__ vector unsigned int __ATTRS_o_ai vec_ctu(vector float __a, + int __b) { + return __builtin_altivec_vctuxs(__a, __b); +} + +#ifdef __VSX__ +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_ctu(vector double __a, int __b) { + __a *= (vector double)(vector unsigned long long)((0x3ffULL + __b) << 52); + return __builtin_convertvector(__a, vector unsigned long long); +} +#endif + +/* vec_vctuxs */ + +static __inline__ vector unsigned int __attribute__((__always_inline__)) +vec_vctuxs(vector float __a, int __b) { + return __builtin_altivec_vctuxs(__a, __b); +} + +/* vec_signed */ + +static __inline__ vector signed int __ATTRS_o_ai +vec_sld(vector signed int, vector signed int, unsigned const int __c); + +static __inline__ vector signed int __ATTRS_o_ai +vec_signed(vector float __a) { + return __builtin_convertvector(__a, vector signed int); +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_signed(vector double __a) { + return __builtin_convertvector(__a, vector signed long long); +} + +static __inline__ vector signed int __attribute__((__always_inline__)) +vec_signed2(vector double __a, vector double __b) { + return (vector signed int) { __a[0], __a[1], __b[0], __b[1] }; +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_signede(vector double __a) { +#ifdef __LITTLE_ENDIAN__ + vector signed int __ret = __builtin_vsx_xvcvdpsxws(__a); + return vec_sld(__ret, __ret, 12); +#else + return __builtin_vsx_xvcvdpsxws(__a); +#endif +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_signedo(vector double __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_vsx_xvcvdpsxws(__a); +#else + vector signed int __ret = __builtin_vsx_xvcvdpsxws(__a); + return vec_sld(__ret, __ret, 12); +#endif +} +#endif + +/* vec_unsigned */ + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sld(vector unsigned int, vector unsigned int, unsigned const int __c); + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_unsigned(vector float __a) { + return __builtin_convertvector(__a, vector unsigned int); +} + +#ifdef __VSX__ +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_unsigned(vector double __a) { + return __builtin_convertvector(__a, vector unsigned long long); +} + +static __inline__ vector unsigned int __attribute__((__always_inline__)) +vec_unsigned2(vector double __a, vector double __b) { + return (vector unsigned int) { __a[0], __a[1], __b[0], __b[1] }; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_unsignede(vector double __a) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned int __ret = __builtin_vsx_xvcvdpuxws(__a); + return vec_sld(__ret, __ret, 12); +#else + return __builtin_vsx_xvcvdpuxws(__a); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_unsignedo(vector double __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_vsx_xvcvdpuxws(__a); +#else + vector unsigned int __ret = __builtin_vsx_xvcvdpuxws(__a); + return vec_sld(__ret, __ret, 12); +#endif +} +#endif + +/* vec_float */ + +static __inline__ vector float __ATTRS_o_ai +vec_sld(vector float, vector float, unsigned const int __c); + +static __inline__ vector float __ATTRS_o_ai +vec_float(vector signed int __a) { + return __builtin_convertvector(__a, vector float); +} + +static __inline__ vector float __ATTRS_o_ai +vec_float(vector unsigned int __a) { + return __builtin_convertvector(__a, vector float); +} + +#ifdef __VSX__ +static __inline__ vector float __ATTRS_o_ai +vec_float2(vector signed long long __a, vector signed long long __b) { + return (vector float) { __a[0], __a[1], __b[0], __b[1] }; +} + +static __inline__ vector float __ATTRS_o_ai +vec_float2(vector unsigned long long __a, vector unsigned long long __b) { + return (vector float) { __a[0], __a[1], __b[0], __b[1] }; +} + +static __inline__ vector float __ATTRS_o_ai +vec_float2(vector double __a, vector double __b) { + return (vector float) { __a[0], __a[1], __b[0], __b[1] }; +} + +static __inline__ vector float __ATTRS_o_ai +vec_floate(vector signed long long __a) { +#ifdef __LITTLE_ENDIAN__ + vector float __ret = __builtin_vsx_xvcvsxdsp(__a); + return vec_sld(__ret, __ret, 12); +#else + return __builtin_vsx_xvcvsxdsp(__a); +#endif +} + +static __inline__ vector float __ATTRS_o_ai +vec_floate(vector unsigned long long __a) { +#ifdef __LITTLE_ENDIAN__ + vector float __ret = __builtin_vsx_xvcvuxdsp(__a); + return vec_sld(__ret, __ret, 12); +#else + return __builtin_vsx_xvcvuxdsp(__a); +#endif +} + +static __inline__ vector float __ATTRS_o_ai +vec_floate(vector double __a) { +#ifdef __LITTLE_ENDIAN__ + vector float __ret = __builtin_vsx_xvcvdpsp(__a); + return vec_sld(__ret, __ret, 12); +#else + return __builtin_vsx_xvcvdpsp(__a); +#endif +} + +static __inline__ vector float __ATTRS_o_ai +vec_floato(vector signed long long __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_vsx_xvcvsxdsp(__a); +#else + vector float __ret = __builtin_vsx_xvcvsxdsp(__a); + return vec_sld(__ret, __ret, 12); +#endif +} + +static __inline__ vector float __ATTRS_o_ai +vec_floato(vector unsigned long long __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_vsx_xvcvuxdsp(__a); +#else + vector float __ret = __builtin_vsx_xvcvuxdsp(__a); + return vec_sld(__ret, __ret, 12); +#endif +} + +static __inline__ vector float __ATTRS_o_ai +vec_floato(vector double __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_vsx_xvcvdpsp(__a); +#else + vector float __ret = __builtin_vsx_xvcvdpsp(__a); + return vec_sld(__ret, __ret, 12); +#endif +} +#endif + +/* vec_double */ + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai +vec_double(vector signed long long __a) { + return __builtin_convertvector(__a, vector double); +} + +static __inline__ vector double __ATTRS_o_ai +vec_double(vector unsigned long long __a) { + return __builtin_convertvector(__a, vector double); +} + +static __inline__ vector double __ATTRS_o_ai +vec_doublee(vector signed int __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_vsx_xvcvsxwdp(vec_sld(__a, __a, 4)); +#else + return __builtin_vsx_xvcvsxwdp(__a); +#endif +} + +static __inline__ vector double __ATTRS_o_ai +vec_doublee(vector unsigned int __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_vsx_xvcvuxwdp(vec_sld(__a, __a, 4)); +#else + return __builtin_vsx_xvcvuxwdp(__a); +#endif +} + +static __inline__ vector double __ATTRS_o_ai +vec_doublee(vector float __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_vsx_xvcvspdp(vec_sld(__a, __a, 4)); +#else + return __builtin_vsx_xvcvspdp(__a); +#endif +} + +static __inline__ vector double __ATTRS_o_ai +vec_doubleh(vector signed int __a) { + vector double __ret = {__a[0], __a[1]}; + return __ret; +} + +static __inline__ vector double __ATTRS_o_ai +vec_doubleh(vector unsigned int __a) { + vector double __ret = {__a[0], __a[1]}; + return __ret; +} + +static __inline__ vector double __ATTRS_o_ai +vec_doubleh(vector float __a) { + vector double __ret = {__a[0], __a[1]}; + return __ret; +} + +static __inline__ vector double __ATTRS_o_ai +vec_doublel(vector signed int __a) { + vector double __ret = {__a[2], __a[3]}; + return __ret; +} + +static __inline__ vector double __ATTRS_o_ai +vec_doublel(vector unsigned int __a) { + vector double __ret = {__a[2], __a[3]}; + return __ret; +} + +static __inline__ vector double __ATTRS_o_ai +vec_doublel(vector float __a) { + vector double __ret = {__a[2], __a[3]}; + return __ret; +} + +static __inline__ vector double __ATTRS_o_ai +vec_doubleo(vector signed int __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_vsx_xvcvsxwdp(__a); +#else + return __builtin_vsx_xvcvsxwdp(vec_sld(__a, __a, 4)); +#endif +} + +static __inline__ vector double __ATTRS_o_ai +vec_doubleo(vector unsigned int __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_vsx_xvcvuxwdp(__a); +#else + return __builtin_vsx_xvcvuxwdp(vec_sld(__a, __a, 4)); +#endif +} + +static __inline__ vector double __ATTRS_o_ai +vec_doubleo(vector float __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_vsx_xvcvspdp(__a); +#else + return __builtin_vsx_xvcvspdp(vec_sld(__a, __a, 4)); +#endif +} +#endif + +/* vec_div */ + +/* Integer vector divides (vectors are scalarized, elements divided + and the vectors reassembled). +*/ +static __inline__ vector signed char __ATTRS_o_ai +vec_div(vector signed char __a, vector signed char __b) { + return __a / __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_div(vector unsigned char __a, vector unsigned char __b) { + return __a / __b; +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_div(vector signed short __a, vector signed short __b) { + return __a / __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_div(vector unsigned short __a, vector unsigned short __b) { + return __a / __b; +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_div(vector signed int __a, vector signed int __b) { + return __a / __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_div(vector unsigned int __a, vector unsigned int __b) { + return __a / __b; +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_div(vector signed long long __a, vector signed long long __b) { + return __a / __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_div(vector unsigned long long __a, vector unsigned long long __b) { + return __a / __b; +} + +static __inline__ vector float __ATTRS_o_ai vec_div(vector float __a, + vector float __b) { + return __a / __b; +} + +static __inline__ vector double __ATTRS_o_ai vec_div(vector double __a, + vector double __b) { + return __a / __b; +} +#endif + +/* vec_dss */ + +static __inline__ void __attribute__((__always_inline__)) vec_dss(int __a) { + __builtin_altivec_dss(__a); +} + +/* vec_dssall */ + +static __inline__ void __attribute__((__always_inline__)) vec_dssall(void) { + __builtin_altivec_dssall(); +} + +/* vec_dst */ +#define vec_dst(__PTR, __CW, __STR) \ + __extension__( \ + { __builtin_altivec_dst((const void *)(__PTR), (__CW), (__STR)); }) + +/* vec_dstst */ +#define vec_dstst(__PTR, __CW, __STR) \ + __extension__( \ + { __builtin_altivec_dstst((const void *)(__PTR), (__CW), (__STR)); }) + +/* vec_dststt */ +#define vec_dststt(__PTR, __CW, __STR) \ + __extension__( \ + { __builtin_altivec_dststt((const void *)(__PTR), (__CW), (__STR)); }) + +/* vec_dstt */ +#define vec_dstt(__PTR, __CW, __STR) \ + __extension__( \ + { __builtin_altivec_dstt((const void *)(__PTR), (__CW), (__STR)); }) + +/* vec_eqv */ + +#ifdef __POWER8_VECTOR__ +static __inline__ vector signed char __ATTRS_o_ai +vec_eqv(vector signed char __a, vector signed char __b) { + return (vector signed char)__builtin_vsx_xxleqv((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_eqv(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_vsx_xxleqv((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai vec_eqv(vector bool char __a, + vector bool char __b) { + return (vector bool char)__builtin_vsx_xxleqv((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_eqv(vector signed short __a, vector signed short __b) { + return (vector signed short)__builtin_vsx_xxleqv((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_eqv(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)__builtin_vsx_xxleqv((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_eqv(vector bool short __a, vector bool short __b) { + return (vector bool short)__builtin_vsx_xxleqv((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_eqv(vector signed int __a, vector signed int __b) { + return (vector signed int)__builtin_vsx_xxleqv((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_eqv(vector unsigned int __a, vector unsigned int __b) { + return __builtin_vsx_xxleqv(__a, __b); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_eqv(vector bool int __a, + vector bool int __b) { + return (vector bool int)__builtin_vsx_xxleqv((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_eqv(vector signed long long __a, vector signed long long __b) { + return (vector signed long long)__builtin_vsx_xxleqv( + (vector unsigned int)__a, (vector unsigned int)__b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_eqv(vector unsigned long long __a, vector unsigned long long __b) { + return (vector unsigned long long)__builtin_vsx_xxleqv( + (vector unsigned int)__a, (vector unsigned int)__b); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_eqv(vector bool long long __a, vector bool long long __b) { + return (vector bool long long)__builtin_vsx_xxleqv((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ vector float __ATTRS_o_ai vec_eqv(vector float __a, + vector float __b) { + return (vector float)__builtin_vsx_xxleqv((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ vector double __ATTRS_o_ai vec_eqv(vector double __a, + vector double __b) { + return (vector double)__builtin_vsx_xxleqv((vector unsigned int)__a, + (vector unsigned int)__b); +} +#endif + +/* vec_expte */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_expte(vector float __a) { + return __builtin_altivec_vexptefp(__a); +} + +/* vec_vexptefp */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vexptefp(vector float __a) { + return __builtin_altivec_vexptefp(__a); +} + +/* vec_floor */ + +static __inline__ vector float __ATTRS_o_ai vec_floor(vector float __a) { +#ifdef __VSX__ + return __builtin_vsx_xvrspim(__a); +#else + return __builtin_altivec_vrfim(__a); +#endif +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_floor(vector double __a) { + return __builtin_vsx_xvrdpim(__a); +} +#endif + +/* vec_vrfim */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vrfim(vector float __a) { + return __builtin_altivec_vrfim(__a); +} + +/* vec_ld */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_ld(int __a, const vector signed char *__b) { + return (vector signed char)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_ld(int __a, const signed char *__b) { + return (vector signed char)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_ld(int __a, const vector unsigned char *__b) { + return (vector unsigned char)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_ld(int __a, const unsigned char *__b) { + return (vector unsigned char)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_ld(int __a, const vector bool char *__b) { + return (vector bool char)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_ld(int __a, + const vector short *__b) { + return (vector short)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_ld(int __a, const short *__b) { + return (vector short)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_ld(int __a, const vector unsigned short *__b) { + return (vector unsigned short)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_ld(int __a, const unsigned short *__b) { + return (vector unsigned short)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_ld(int __a, const vector bool short *__b) { + return (vector bool short)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_ld(int __a, + const vector pixel *__b) { + return (vector pixel)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_ld(int __a, + const vector int *__b) { + return (vector int)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_ld(int __a, const int *__b) { + return (vector int)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_ld(int __a, const vector unsigned int *__b) { + return (vector unsigned int)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_ld(int __a, const unsigned int *__b) { + return (vector unsigned int)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_ld(int __a, const vector bool int *__b) { + return (vector bool int)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_ld(int __a, + const vector float *__b) { + return (vector float)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_ld(int __a, const float *__b) { + return (vector float)__builtin_altivec_lvx(__a, __b); +} + +/* vec_lvx */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvx(int __a, const vector signed char *__b) { + return (vector signed char)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvx(int __a, const signed char *__b) { + return (vector signed char)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvx(int __a, const vector unsigned char *__b) { + return (vector unsigned char)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvx(int __a, const unsigned char *__b) { + return (vector unsigned char)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_lvx(int __a, const vector bool char *__b) { + return (vector bool char)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_lvx(int __a, + const vector short *__b) { + return (vector short)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_lvx(int __a, const short *__b) { + return (vector short)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvx(int __a, const vector unsigned short *__b) { + return (vector unsigned short)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvx(int __a, const unsigned short *__b) { + return (vector unsigned short)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_lvx(int __a, const vector bool short *__b) { + return (vector bool short)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_lvx(int __a, + const vector pixel *__b) { + return (vector pixel)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_lvx(int __a, + const vector int *__b) { + return (vector int)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_lvx(int __a, const int *__b) { + return (vector int)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvx(int __a, const vector unsigned int *__b) { + return (vector unsigned int)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvx(int __a, const unsigned int *__b) { + return (vector unsigned int)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_lvx(int __a, const vector bool int *__b) { + return (vector bool int)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvx(int __a, + const vector float *__b) { + return (vector float)__builtin_altivec_lvx(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvx(int __a, const float *__b) { + return (vector float)__builtin_altivec_lvx(__a, __b); +} + +/* vec_lde */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_lde(int __a, const signed char *__b) { + return (vector signed char)__builtin_altivec_lvebx(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lde(int __a, const unsigned char *__b) { + return (vector unsigned char)__builtin_altivec_lvebx(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_lde(int __a, const short *__b) { + return (vector short)__builtin_altivec_lvehx(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lde(int __a, const unsigned short *__b) { + return (vector unsigned short)__builtin_altivec_lvehx(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_lde(int __a, const int *__b) { + return (vector int)__builtin_altivec_lvewx(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lde(int __a, const unsigned int *__b) { + return (vector unsigned int)__builtin_altivec_lvewx(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_lde(int __a, const float *__b) { + return (vector float)__builtin_altivec_lvewx(__a, __b); +} + +/* vec_lvebx */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvebx(int __a, const signed char *__b) { + return (vector signed char)__builtin_altivec_lvebx(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvebx(int __a, const unsigned char *__b) { + return (vector unsigned char)__builtin_altivec_lvebx(__a, __b); +} + +/* vec_lvehx */ + +static __inline__ vector short __ATTRS_o_ai vec_lvehx(int __a, + const short *__b) { + return (vector short)__builtin_altivec_lvehx(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvehx(int __a, const unsigned short *__b) { + return (vector unsigned short)__builtin_altivec_lvehx(__a, __b); +} + +/* vec_lvewx */ + +static __inline__ vector int __ATTRS_o_ai vec_lvewx(int __a, const int *__b) { + return (vector int)__builtin_altivec_lvewx(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvewx(int __a, const unsigned int *__b) { + return (vector unsigned int)__builtin_altivec_lvewx(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvewx(int __a, + const float *__b) { + return (vector float)__builtin_altivec_lvewx(__a, __b); +} + +/* vec_ldl */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_ldl(int __a, const vector signed char *__b) { + return (vector signed char)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_ldl(int __a, const signed char *__b) { + return (vector signed char)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_ldl(int __a, const vector unsigned char *__b) { + return (vector unsigned char)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_ldl(int __a, const unsigned char *__b) { + return (vector unsigned char)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_ldl(int __a, const vector bool char *__b) { + return (vector bool char)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_ldl(int __a, + const vector short *__b) { + return (vector short)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_ldl(int __a, const short *__b) { + return (vector short)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_ldl(int __a, const vector unsigned short *__b) { + return (vector unsigned short)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_ldl(int __a, const unsigned short *__b) { + return (vector unsigned short)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_ldl(int __a, const vector bool short *__b) { + return (vector bool short)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_ldl(int __a, + const vector pixel *__b) { + return (vector pixel short)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_ldl(int __a, + const vector int *__b) { + return (vector int)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_ldl(int __a, const int *__b) { + return (vector int)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_ldl(int __a, const vector unsigned int *__b) { + return (vector unsigned int)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_ldl(int __a, const unsigned int *__b) { + return (vector unsigned int)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_ldl(int __a, const vector bool int *__b) { + return (vector bool int)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_ldl(int __a, + const vector float *__b) { + return (vector float)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_ldl(int __a, const float *__b) { + return (vector float)__builtin_altivec_lvxl(__a, __b); +} + +/* vec_lvxl */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvxl(int __a, const vector signed char *__b) { + return (vector signed char)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvxl(int __a, const signed char *__b) { + return (vector signed char)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvxl(int __a, const vector unsigned char *__b) { + return (vector unsigned char)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvxl(int __a, const unsigned char *__b) { + return (vector unsigned char)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_lvxl(int __a, const vector bool char *__b) { + return (vector bool char)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_lvxl(int __a, + const vector short *__b) { + return (vector short)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_lvxl(int __a, + const short *__b) { + return (vector short)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvxl(int __a, const vector unsigned short *__b) { + return (vector unsigned short)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvxl(int __a, const unsigned short *__b) { + return (vector unsigned short)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_lvxl(int __a, const vector bool short *__b) { + return (vector bool short)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_lvxl(int __a, + const vector pixel *__b) { + return (vector pixel)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_lvxl(int __a, + const vector int *__b) { + return (vector int)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_lvxl(int __a, const int *__b) { + return (vector int)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvxl(int __a, const vector unsigned int *__b) { + return (vector unsigned int)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvxl(int __a, const unsigned int *__b) { + return (vector unsigned int)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_lvxl(int __a, const vector bool int *__b) { + return (vector bool int)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvxl(int __a, + const vector float *__b) { + return (vector float)__builtin_altivec_lvxl(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvxl(int __a, + const float *__b) { + return (vector float)__builtin_altivec_lvxl(__a, __b); +} + +/* vec_loge */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_loge(vector float __a) { + return __builtin_altivec_vlogefp(__a); +} + +/* vec_vlogefp */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vlogefp(vector float __a) { + return __builtin_altivec_vlogefp(__a); +} + +/* vec_lvsl */ + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsl(int __a, const signed char *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsl(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvsl(int __a, const signed char *__b) { + return (vector unsigned char)__builtin_altivec_lvsl(__a, __b); +} +#endif + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsl(int __a, const unsigned char *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsl(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvsl(int __a, const unsigned char *__b) { + return (vector unsigned char)__builtin_altivec_lvsl(__a, __b); +} +#endif + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsl(int __a, const short *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsl(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsl(int __a, + const short *__b) { + return (vector unsigned char)__builtin_altivec_lvsl(__a, __b); +} +#endif + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsl(int __a, const unsigned short *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsl(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvsl(int __a, const unsigned short *__b) { + return (vector unsigned char)__builtin_altivec_lvsl(__a, __b); +} +#endif + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsl(int __a, const int *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsl(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsl(int __a, + const int *__b) { + return (vector unsigned char)__builtin_altivec_lvsl(__a, __b); +} +#endif + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsl(int __a, const unsigned int *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsl(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvsl(int __a, const unsigned int *__b) { + return (vector unsigned char)__builtin_altivec_lvsl(__a, __b); +} +#endif + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsl(int __a, const float *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsl(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsl(int __a, + const float *__b) { + return (vector unsigned char)__builtin_altivec_lvsl(__a, __b); +} +#endif + +/* vec_lvsr */ + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsr(int __a, const signed char *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsr(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvsr(int __a, const signed char *__b) { + return (vector unsigned char)__builtin_altivec_lvsr(__a, __b); +} +#endif + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsr(int __a, const unsigned char *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsr(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvsr(int __a, const unsigned char *__b) { + return (vector unsigned char)__builtin_altivec_lvsr(__a, __b); +} +#endif + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsr(int __a, const short *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsr(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsr(int __a, + const short *__b) { + return (vector unsigned char)__builtin_altivec_lvsr(__a, __b); +} +#endif + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsr(int __a, const unsigned short *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsr(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvsr(int __a, const unsigned short *__b) { + return (vector unsigned char)__builtin_altivec_lvsr(__a, __b); +} +#endif + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsr(int __a, const int *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsr(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsr(int __a, + const int *__b) { + return (vector unsigned char)__builtin_altivec_lvsr(__a, __b); +} +#endif + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsr(int __a, const unsigned int *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsr(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvsr(int __a, const unsigned int *__b) { + return (vector unsigned char)__builtin_altivec_lvsr(__a, __b); +} +#endif + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector unsigned char __ATTRS_o_ai + __attribute__((__deprecated__("use assignment for unaligned little endian \ +loads/stores"))) vec_lvsr(int __a, const float *__b) { + vector unsigned char mask = + (vector unsigned char)__builtin_altivec_lvsr(__a, __b); + vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0}; + return vec_perm(mask, mask, reverse); +} +#else +static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsr(int __a, + const float *__b) { + return (vector unsigned char)__builtin_altivec_lvsr(__a, __b); +} +#endif + +/* vec_madd */ +static __inline__ vector signed short __ATTRS_o_ai +vec_mladd(vector signed short, vector signed short, vector signed short); +static __inline__ vector signed short __ATTRS_o_ai +vec_mladd(vector signed short, vector unsigned short, vector unsigned short); +static __inline__ vector signed short __ATTRS_o_ai +vec_mladd(vector unsigned short, vector signed short, vector signed short); +static __inline__ vector unsigned short __ATTRS_o_ai +vec_mladd(vector unsigned short, vector unsigned short, vector unsigned short); + +static __inline__ vector signed short __ATTRS_o_ai vec_madd( + vector signed short __a, vector signed short __b, vector signed short __c) { + return vec_mladd(__a, __b, __c); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_madd(vector signed short __a, vector unsigned short __b, + vector unsigned short __c) { + return vec_mladd(__a, __b, __c); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_madd(vector unsigned short __a, vector signed short __b, + vector signed short __c) { + return vec_mladd(__a, __b, __c); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_madd(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c) { + return vec_mladd(__a, __b, __c); +} + +static __inline__ vector float __ATTRS_o_ai vec_madd(vector float __a, + vector float __b, + vector float __c) { +#ifdef __VSX__ + return __builtin_vsx_xvmaddasp(__a, __b, __c); +#else + return __builtin_altivec_vmaddfp(__a, __b, __c); +#endif +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_madd(vector double __a, + vector double __b, + vector double __c) { + return __builtin_vsx_xvmaddadp(__a, __b, __c); +} +#endif + +/* vec_vmaddfp */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vmaddfp(vector float __a, vector float __b, vector float __c) { + return __builtin_altivec_vmaddfp(__a, __b, __c); +} + +/* vec_madds */ + +static __inline__ vector signed short __attribute__((__always_inline__)) +vec_madds(vector signed short __a, vector signed short __b, + vector signed short __c) { + return __builtin_altivec_vmhaddshs(__a, __b, __c); +} + +/* vec_vmhaddshs */ +static __inline__ vector signed short __attribute__((__always_inline__)) +vec_vmhaddshs(vector signed short __a, vector signed short __b, + vector signed short __c) { + return __builtin_altivec_vmhaddshs(__a, __b, __c); +} + +/* vec_msub */ + +#ifdef __VSX__ +static __inline__ vector float __ATTRS_o_ai vec_msub(vector float __a, + vector float __b, + vector float __c) { + return __builtin_vsx_xvmsubasp(__a, __b, __c); +} + +static __inline__ vector double __ATTRS_o_ai vec_msub(vector double __a, + vector double __b, + vector double __c) { + return __builtin_vsx_xvmsubadp(__a, __b, __c); +} +#endif + +/* vec_max */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_max(vector signed char __a, vector signed char __b) { + return __builtin_altivec_vmaxsb(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_max(vector bool char __a, vector signed char __b) { + return __builtin_altivec_vmaxsb((vector signed char)__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_max(vector signed char __a, vector bool char __b) { + return __builtin_altivec_vmaxsb(__a, (vector signed char)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_max(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vmaxub(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_max(vector bool char __a, vector unsigned char __b) { + return __builtin_altivec_vmaxub((vector unsigned char)__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_max(vector unsigned char __a, vector bool char __b) { + return __builtin_altivec_vmaxub(__a, (vector unsigned char)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_max(vector short __a, + vector short __b) { + return __builtin_altivec_vmaxsh(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_max(vector bool short __a, + vector short __b) { + return __builtin_altivec_vmaxsh((vector short)__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_max(vector short __a, + vector bool short __b) { + return __builtin_altivec_vmaxsh(__a, (vector short)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_max(vector unsigned short __a, vector unsigned short __b) { + return __builtin_altivec_vmaxuh(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_max(vector bool short __a, vector unsigned short __b) { + return __builtin_altivec_vmaxuh((vector unsigned short)__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_max(vector unsigned short __a, vector bool short __b) { + return __builtin_altivec_vmaxuh(__a, (vector unsigned short)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_max(vector int __a, + vector int __b) { + return __builtin_altivec_vmaxsw(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_max(vector bool int __a, + vector int __b) { + return __builtin_altivec_vmaxsw((vector int)__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_max(vector int __a, + vector bool int __b) { + return __builtin_altivec_vmaxsw(__a, (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_max(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vmaxuw(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_max(vector bool int __a, vector unsigned int __b) { + return __builtin_altivec_vmaxuw((vector unsigned int)__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_max(vector unsigned int __a, vector bool int __b) { + return __builtin_altivec_vmaxuw(__a, (vector unsigned int)__b); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_max(vector signed long long __a, vector signed long long __b) { + return __builtin_altivec_vmaxsd(__a, __b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_max(vector bool long long __a, vector signed long long __b) { + return __builtin_altivec_vmaxsd((vector signed long long)__a, __b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_max(vector signed long long __a, vector bool long long __b) { + return __builtin_altivec_vmaxsd(__a, (vector signed long long)__b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_max(vector unsigned long long __a, vector unsigned long long __b) { + return __builtin_altivec_vmaxud(__a, __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_max(vector bool long long __a, vector unsigned long long __b) { + return __builtin_altivec_vmaxud((vector unsigned long long)__a, __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_max(vector unsigned long long __a, vector bool long long __b) { + return __builtin_altivec_vmaxud(__a, (vector unsigned long long)__b); +} +#endif + +static __inline__ vector float __ATTRS_o_ai vec_max(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvmaxsp(__a, __b); +#else + return __builtin_altivec_vmaxfp(__a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_max(vector double __a, + vector double __b) { + return __builtin_vsx_xvmaxdp(__a, __b); +} +#endif + +/* vec_vmaxsb */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vmaxsb(vector signed char __a, vector signed char __b) { + return __builtin_altivec_vmaxsb(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vmaxsb(vector bool char __a, vector signed char __b) { + return __builtin_altivec_vmaxsb((vector signed char)__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vmaxsb(vector signed char __a, vector bool char __b) { + return __builtin_altivec_vmaxsb(__a, (vector signed char)__b); +} + +/* vec_vmaxub */ + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vmaxub(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vmaxub(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vmaxub(vector bool char __a, vector unsigned char __b) { + return __builtin_altivec_vmaxub((vector unsigned char)__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vmaxub(vector unsigned char __a, vector bool char __b) { + return __builtin_altivec_vmaxub(__a, (vector unsigned char)__b); +} + +/* vec_vmaxsh */ + +static __inline__ vector short __ATTRS_o_ai vec_vmaxsh(vector short __a, + vector short __b) { + return __builtin_altivec_vmaxsh(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vmaxsh(vector bool short __a, + vector short __b) { + return __builtin_altivec_vmaxsh((vector short)__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vmaxsh(vector short __a, + vector bool short __b) { + return __builtin_altivec_vmaxsh(__a, (vector short)__b); +} + +/* vec_vmaxuh */ + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vmaxuh(vector unsigned short __a, vector unsigned short __b) { + return __builtin_altivec_vmaxuh(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vmaxuh(vector bool short __a, vector unsigned short __b) { + return __builtin_altivec_vmaxuh((vector unsigned short)__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vmaxuh(vector unsigned short __a, vector bool short __b) { + return __builtin_altivec_vmaxuh(__a, (vector unsigned short)__b); +} + +/* vec_vmaxsw */ + +static __inline__ vector int __ATTRS_o_ai vec_vmaxsw(vector int __a, + vector int __b) { + return __builtin_altivec_vmaxsw(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vmaxsw(vector bool int __a, + vector int __b) { + return __builtin_altivec_vmaxsw((vector int)__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vmaxsw(vector int __a, + vector bool int __b) { + return __builtin_altivec_vmaxsw(__a, (vector int)__b); +} + +/* vec_vmaxuw */ + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vmaxuw(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vmaxuw(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vmaxuw(vector bool int __a, vector unsigned int __b) { + return __builtin_altivec_vmaxuw((vector unsigned int)__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vmaxuw(vector unsigned int __a, vector bool int __b) { + return __builtin_altivec_vmaxuw(__a, (vector unsigned int)__b); +} + +/* vec_vmaxfp */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vmaxfp(vector float __a, vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvmaxsp(__a, __b); +#else + return __builtin_altivec_vmaxfp(__a, __b); +#endif +} + +/* vec_mergeh */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_mergeh(vector signed char __a, vector signed char __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12, + 0x03, 0x13, 0x04, 0x14, 0x05, 0x15, + 0x06, 0x16, 0x07, 0x17)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_mergeh(vector unsigned char __a, vector unsigned char __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12, + 0x03, 0x13, 0x04, 0x14, 0x05, 0x15, + 0x06, 0x16, 0x07, 0x17)); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_mergeh(vector bool char __a, vector bool char __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12, + 0x03, 0x13, 0x04, 0x14, 0x05, 0x15, + 0x06, 0x16, 0x07, 0x17)); +} + +static __inline__ vector short __ATTRS_o_ai vec_mergeh(vector short __a, + vector short __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03, + 0x12, 0x13, 0x04, 0x05, 0x14, 0x15, + 0x06, 0x07, 0x16, 0x17)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_mergeh(vector unsigned short __a, vector unsigned short __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03, + 0x12, 0x13, 0x04, 0x05, 0x14, 0x15, + 0x06, 0x07, 0x16, 0x17)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_mergeh(vector bool short __a, vector bool short __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03, + 0x12, 0x13, 0x04, 0x05, 0x14, 0x15, + 0x06, 0x07, 0x16, 0x17)); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_mergeh(vector pixel __a, + vector pixel __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03, + 0x12, 0x13, 0x04, 0x05, 0x14, 0x15, + 0x06, 0x07, 0x16, 0x17)); +} + +static __inline__ vector int __ATTRS_o_ai vec_mergeh(vector int __a, + vector int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11, + 0x12, 0x13, 0x04, 0x05, 0x06, 0x07, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_mergeh(vector unsigned int __a, vector unsigned int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11, + 0x12, 0x13, 0x04, 0x05, 0x06, 0x07, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_mergeh(vector bool int __a, + vector bool int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11, + 0x12, 0x13, 0x04, 0x05, 0x06, 0x07, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector float __ATTRS_o_ai vec_mergeh(vector float __a, + vector float __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11, + 0x12, 0x13, 0x04, 0x05, 0x06, 0x07, + 0x14, 0x15, 0x16, 0x17)); +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_mergeh(vector signed long long __a, vector signed long long __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_mergeh(vector signed long long __a, vector bool long long __b) { + return vec_perm(__a, (vector signed long long)__b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_mergeh(vector bool long long __a, vector signed long long __b) { + return vec_perm((vector signed long long)__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_mergeh(vector unsigned long long __a, vector unsigned long long __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_mergeh(vector unsigned long long __a, vector bool long long __b) { + return vec_perm(__a, (vector unsigned long long)__b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_mergeh(vector bool long long __a, vector unsigned long long __b) { + return vec_perm((vector unsigned long long)__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_mergeh(vector bool long long __a, vector bool long long __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector double __ATTRS_o_ai vec_mergeh(vector double __a, + vector double __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17)); +} +static __inline__ vector double __ATTRS_o_ai +vec_mergeh(vector double __a, vector bool long long __b) { + return vec_perm(__a, (vector double)__b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17)); +} +static __inline__ vector double __ATTRS_o_ai +vec_mergeh(vector bool long long __a, vector double __b) { + return vec_perm((vector double)__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17)); +} +#endif + +/* vec_vmrghb */ + +#define __builtin_altivec_vmrghb vec_vmrghb + +static __inline__ vector signed char __ATTRS_o_ai +vec_vmrghb(vector signed char __a, vector signed char __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12, + 0x03, 0x13, 0x04, 0x14, 0x05, 0x15, + 0x06, 0x16, 0x07, 0x17)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vmrghb(vector unsigned char __a, vector unsigned char __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12, + 0x03, 0x13, 0x04, 0x14, 0x05, 0x15, + 0x06, 0x16, 0x07, 0x17)); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_vmrghb(vector bool char __a, vector bool char __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12, + 0x03, 0x13, 0x04, 0x14, 0x05, 0x15, + 0x06, 0x16, 0x07, 0x17)); +} + +/* vec_vmrghh */ + +#define __builtin_altivec_vmrghh vec_vmrghh + +static __inline__ vector short __ATTRS_o_ai vec_vmrghh(vector short __a, + vector short __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03, + 0x12, 0x13, 0x04, 0x05, 0x14, 0x15, + 0x06, 0x07, 0x16, 0x17)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vmrghh(vector unsigned short __a, vector unsigned short __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03, + 0x12, 0x13, 0x04, 0x05, 0x14, 0x15, + 0x06, 0x07, 0x16, 0x17)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vmrghh(vector bool short __a, vector bool short __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03, + 0x12, 0x13, 0x04, 0x05, 0x14, 0x15, + 0x06, 0x07, 0x16, 0x17)); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vmrghh(vector pixel __a, + vector pixel __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03, + 0x12, 0x13, 0x04, 0x05, 0x14, 0x15, + 0x06, 0x07, 0x16, 0x17)); +} + +/* vec_vmrghw */ + +#define __builtin_altivec_vmrghw vec_vmrghw + +static __inline__ vector int __ATTRS_o_ai vec_vmrghw(vector int __a, + vector int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11, + 0x12, 0x13, 0x04, 0x05, 0x06, 0x07, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vmrghw(vector unsigned int __a, vector unsigned int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11, + 0x12, 0x13, 0x04, 0x05, 0x06, 0x07, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_vmrghw(vector bool int __a, + vector bool int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11, + 0x12, 0x13, 0x04, 0x05, 0x06, 0x07, + 0x14, 0x15, 0x16, 0x17)); +} + +static __inline__ vector float __ATTRS_o_ai vec_vmrghw(vector float __a, + vector float __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11, + 0x12, 0x13, 0x04, 0x05, 0x06, 0x07, + 0x14, 0x15, 0x16, 0x17)); +} + +/* vec_mergel */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_mergel(vector signed char __a, vector signed char __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, + 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D, + 0x0E, 0x1E, 0x0F, 0x1F)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_mergel(vector unsigned char __a, vector unsigned char __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, + 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D, + 0x0E, 0x1E, 0x0F, 0x1F)); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_mergel(vector bool char __a, vector bool char __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, + 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D, + 0x0E, 0x1E, 0x0F, 0x1F)); +} + +static __inline__ vector short __ATTRS_o_ai vec_mergel(vector short __a, + vector short __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, + 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D, + 0x0E, 0x0F, 0x1E, 0x1F)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_mergel(vector unsigned short __a, vector unsigned short __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, + 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D, + 0x0E, 0x0F, 0x1E, 0x1F)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_mergel(vector bool short __a, vector bool short __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, + 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D, + 0x0E, 0x0F, 0x1E, 0x1F)); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_mergel(vector pixel __a, + vector pixel __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, + 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D, + 0x0E, 0x0F, 0x1E, 0x1F)); +} + +static __inline__ vector int __ATTRS_o_ai vec_mergel(vector int __a, + vector int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, + 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_mergel(vector unsigned int __a, vector unsigned int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, + 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F)); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_mergel(vector bool int __a, + vector bool int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, + 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F)); +} + +static __inline__ vector float __ATTRS_o_ai vec_mergel(vector float __a, + vector float __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, + 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F)); +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_mergel(vector signed long long __a, vector signed long long __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, + 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B, + 0x1C, 0x1D, 0x1E, 0x1F)); +} +static __inline__ vector signed long long __ATTRS_o_ai +vec_mergel(vector signed long long __a, vector bool long long __b) { + return vec_perm(__a, (vector signed long long)__b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, + 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B, + 0x1C, 0x1D, 0x1E, 0x1F)); +} +static __inline__ vector signed long long __ATTRS_o_ai +vec_mergel(vector bool long long __a, vector signed long long __b) { + return vec_perm((vector signed long long)__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, + 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B, + 0x1C, 0x1D, 0x1E, 0x1F)); +} +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_mergel(vector unsigned long long __a, vector unsigned long long __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, + 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B, + 0x1C, 0x1D, 0x1E, 0x1F)); +} +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_mergel(vector unsigned long long __a, vector bool long long __b) { + return vec_perm(__a, (vector unsigned long long)__b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, + 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B, + 0x1C, 0x1D, 0x1E, 0x1F)); +} +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_mergel(vector bool long long __a, vector unsigned long long __b) { + return vec_perm((vector unsigned long long)__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, + 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B, + 0x1C, 0x1D, 0x1E, 0x1F)); +} +static __inline__ vector bool long long __ATTRS_o_ai +vec_mergel(vector bool long long __a, vector bool long long __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, + 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B, + 0x1C, 0x1D, 0x1E, 0x1F)); +} +static __inline__ vector double __ATTRS_o_ai vec_mergel(vector double __a, + vector double __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, + 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B, + 0x1C, 0x1D, 0x1E, 0x1F)); +} +static __inline__ vector double __ATTRS_o_ai +vec_mergel(vector double __a, vector bool long long __b) { + return vec_perm(__a, (vector double)__b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, + 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B, + 0x1C, 0x1D, 0x1E, 0x1F)); +} +static __inline__ vector double __ATTRS_o_ai +vec_mergel(vector bool long long __a, vector double __b) { + return vec_perm((vector double)__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, + 0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B, + 0x1C, 0x1D, 0x1E, 0x1F)); +} +#endif + +/* vec_vmrglb */ + +#define __builtin_altivec_vmrglb vec_vmrglb + +static __inline__ vector signed char __ATTRS_o_ai +vec_vmrglb(vector signed char __a, vector signed char __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, + 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D, + 0x0E, 0x1E, 0x0F, 0x1F)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vmrglb(vector unsigned char __a, vector unsigned char __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, + 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D, + 0x0E, 0x1E, 0x0F, 0x1F)); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_vmrglb(vector bool char __a, vector bool char __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, + 0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D, + 0x0E, 0x1E, 0x0F, 0x1F)); +} + +/* vec_vmrglh */ + +#define __builtin_altivec_vmrglh vec_vmrglh + +static __inline__ vector short __ATTRS_o_ai vec_vmrglh(vector short __a, + vector short __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, + 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D, + 0x0E, 0x0F, 0x1E, 0x1F)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vmrglh(vector unsigned short __a, vector unsigned short __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, + 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D, + 0x0E, 0x0F, 0x1E, 0x1F)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vmrglh(vector bool short __a, vector bool short __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, + 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D, + 0x0E, 0x0F, 0x1E, 0x1F)); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vmrglh(vector pixel __a, + vector pixel __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, + 0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D, + 0x0E, 0x0F, 0x1E, 0x1F)); +} + +/* vec_vmrglw */ + +#define __builtin_altivec_vmrglw vec_vmrglw + +static __inline__ vector int __ATTRS_o_ai vec_vmrglw(vector int __a, + vector int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, + 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vmrglw(vector unsigned int __a, vector unsigned int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, + 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F)); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_vmrglw(vector bool int __a, + vector bool int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, + 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F)); +} + +static __inline__ vector float __ATTRS_o_ai vec_vmrglw(vector float __a, + vector float __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, + 0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F)); +} + +#ifdef __POWER8_VECTOR__ +/* vec_mergee */ + +static __inline__ vector bool int __ATTRS_o_ai vec_mergee(vector bool int __a, + vector bool int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11, + 0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B, + 0x18, 0x19, 0x1A, 0x1B)); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_mergee(vector signed int __a, vector signed int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11, + 0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B, + 0x18, 0x19, 0x1A, 0x1B)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_mergee(vector unsigned int __a, vector unsigned int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11, + 0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B, + 0x18, 0x19, 0x1A, 0x1B)); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_mergee(vector bool long long __a, vector bool long long __b) { + return vec_mergeh(__a, __b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_mergee(vector signed long long __a, vector signed long long __b) { + return vec_mergeh(__a, __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_mergee(vector unsigned long long __a, vector unsigned long long __b) { + return vec_mergeh(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai +vec_mergee(vector float __a, vector float __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11, + 0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B, + 0x18, 0x19, 0x1A, 0x1B)); +} + +static __inline__ vector double __ATTRS_o_ai +vec_mergee(vector double __a, vector double __b) { + return vec_mergeh(__a, __b); +} + +/* vec_mergeo */ + +static __inline__ vector bool int __ATTRS_o_ai vec_mergeo(vector bool int __a, + vector bool int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15, + 0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F)); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_mergeo(vector signed int __a, vector signed int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15, + 0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_mergeo(vector unsigned int __a, vector unsigned int __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15, + 0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F)); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_mergeo(vector bool long long __a, vector bool long long __b) { + return vec_mergel(__a, __b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_mergeo(vector signed long long __a, vector signed long long __b) { + return vec_mergel(__a, __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_mergeo(vector unsigned long long __a, vector unsigned long long __b) { + return vec_mergel(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai +vec_mergeo(vector float __a, vector float __b) { + return vec_perm(__a, __b, + (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15, + 0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F, + 0x1C, 0x1D, 0x1E, 0x1F)); +} + +static __inline__ vector double __ATTRS_o_ai +vec_mergeo(vector double __a, vector double __b) { + return vec_mergel(__a, __b); +} + +#endif + +/* vec_mfvscr */ + +static __inline__ vector unsigned short __attribute__((__always_inline__)) +vec_mfvscr(void) { + return __builtin_altivec_mfvscr(); +} + +/* vec_min */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_min(vector signed char __a, vector signed char __b) { + return __builtin_altivec_vminsb(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_min(vector bool char __a, vector signed char __b) { + return __builtin_altivec_vminsb((vector signed char)__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_min(vector signed char __a, vector bool char __b) { + return __builtin_altivec_vminsb(__a, (vector signed char)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_min(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vminub(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_min(vector bool char __a, vector unsigned char __b) { + return __builtin_altivec_vminub((vector unsigned char)__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_min(vector unsigned char __a, vector bool char __b) { + return __builtin_altivec_vminub(__a, (vector unsigned char)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_min(vector short __a, + vector short __b) { + return __builtin_altivec_vminsh(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_min(vector bool short __a, + vector short __b) { + return __builtin_altivec_vminsh((vector short)__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_min(vector short __a, + vector bool short __b) { + return __builtin_altivec_vminsh(__a, (vector short)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_min(vector unsigned short __a, vector unsigned short __b) { + return __builtin_altivec_vminuh(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_min(vector bool short __a, vector unsigned short __b) { + return __builtin_altivec_vminuh((vector unsigned short)__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_min(vector unsigned short __a, vector bool short __b) { + return __builtin_altivec_vminuh(__a, (vector unsigned short)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_min(vector int __a, + vector int __b) { + return __builtin_altivec_vminsw(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_min(vector bool int __a, + vector int __b) { + return __builtin_altivec_vminsw((vector int)__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_min(vector int __a, + vector bool int __b) { + return __builtin_altivec_vminsw(__a, (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_min(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vminuw(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_min(vector bool int __a, vector unsigned int __b) { + return __builtin_altivec_vminuw((vector unsigned int)__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_min(vector unsigned int __a, vector bool int __b) { + return __builtin_altivec_vminuw(__a, (vector unsigned int)__b); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_min(vector signed long long __a, vector signed long long __b) { + return __builtin_altivec_vminsd(__a, __b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_min(vector bool long long __a, vector signed long long __b) { + return __builtin_altivec_vminsd((vector signed long long)__a, __b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_min(vector signed long long __a, vector bool long long __b) { + return __builtin_altivec_vminsd(__a, (vector signed long long)__b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_min(vector unsigned long long __a, vector unsigned long long __b) { + return __builtin_altivec_vminud(__a, __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_min(vector bool long long __a, vector unsigned long long __b) { + return __builtin_altivec_vminud((vector unsigned long long)__a, __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_min(vector unsigned long long __a, vector bool long long __b) { + return __builtin_altivec_vminud(__a, (vector unsigned long long)__b); +} +#endif + +static __inline__ vector float __ATTRS_o_ai vec_min(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvminsp(__a, __b); +#else + return __builtin_altivec_vminfp(__a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_min(vector double __a, + vector double __b) { + return __builtin_vsx_xvmindp(__a, __b); +} +#endif + +/* vec_vminsb */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vminsb(vector signed char __a, vector signed char __b) { + return __builtin_altivec_vminsb(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vminsb(vector bool char __a, vector signed char __b) { + return __builtin_altivec_vminsb((vector signed char)__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vminsb(vector signed char __a, vector bool char __b) { + return __builtin_altivec_vminsb(__a, (vector signed char)__b); +} + +/* vec_vminub */ + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vminub(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vminub(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vminub(vector bool char __a, vector unsigned char __b) { + return __builtin_altivec_vminub((vector unsigned char)__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vminub(vector unsigned char __a, vector bool char __b) { + return __builtin_altivec_vminub(__a, (vector unsigned char)__b); +} + +/* vec_vminsh */ + +static __inline__ vector short __ATTRS_o_ai vec_vminsh(vector short __a, + vector short __b) { + return __builtin_altivec_vminsh(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vminsh(vector bool short __a, + vector short __b) { + return __builtin_altivec_vminsh((vector short)__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vminsh(vector short __a, + vector bool short __b) { + return __builtin_altivec_vminsh(__a, (vector short)__b); +} + +/* vec_vminuh */ + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vminuh(vector unsigned short __a, vector unsigned short __b) { + return __builtin_altivec_vminuh(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vminuh(vector bool short __a, vector unsigned short __b) { + return __builtin_altivec_vminuh((vector unsigned short)__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vminuh(vector unsigned short __a, vector bool short __b) { + return __builtin_altivec_vminuh(__a, (vector unsigned short)__b); +} + +/* vec_vminsw */ + +static __inline__ vector int __ATTRS_o_ai vec_vminsw(vector int __a, + vector int __b) { + return __builtin_altivec_vminsw(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vminsw(vector bool int __a, + vector int __b) { + return __builtin_altivec_vminsw((vector int)__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vminsw(vector int __a, + vector bool int __b) { + return __builtin_altivec_vminsw(__a, (vector int)__b); +} + +/* vec_vminuw */ + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vminuw(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vminuw(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vminuw(vector bool int __a, vector unsigned int __b) { + return __builtin_altivec_vminuw((vector unsigned int)__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vminuw(vector unsigned int __a, vector bool int __b) { + return __builtin_altivec_vminuw(__a, (vector unsigned int)__b); +} + +/* vec_vminfp */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vminfp(vector float __a, vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvminsp(__a, __b); +#else + return __builtin_altivec_vminfp(__a, __b); +#endif +} + +/* vec_mladd */ + +#define __builtin_altivec_vmladduhm vec_mladd + +static __inline__ vector short __ATTRS_o_ai vec_mladd(vector short __a, + vector short __b, + vector short __c) { + return __a * __b + __c; +} + +static __inline__ vector short __ATTRS_o_ai vec_mladd( + vector short __a, vector unsigned short __b, vector unsigned short __c) { + return __a * (vector short)__b + (vector short)__c; +} + +static __inline__ vector short __ATTRS_o_ai vec_mladd(vector unsigned short __a, + vector short __b, + vector short __c) { + return (vector short)__a * __b + __c; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_mladd(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c) { + return __a * __b + __c; +} + +/* vec_vmladduhm */ + +static __inline__ vector short __ATTRS_o_ai vec_vmladduhm(vector short __a, + vector short __b, + vector short __c) { + return __a * __b + __c; +} + +static __inline__ vector short __ATTRS_o_ai vec_vmladduhm( + vector short __a, vector unsigned short __b, vector unsigned short __c) { + return __a * (vector short)__b + (vector short)__c; +} + +static __inline__ vector short __ATTRS_o_ai +vec_vmladduhm(vector unsigned short __a, vector short __b, vector short __c) { + return (vector short)__a * __b + __c; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vmladduhm(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c) { + return __a * __b + __c; +} + +/* vec_mradds */ + +static __inline__ vector short __attribute__((__always_inline__)) +vec_mradds(vector short __a, vector short __b, vector short __c) { + return __builtin_altivec_vmhraddshs(__a, __b, __c); +} + +/* vec_vmhraddshs */ + +static __inline__ vector short __attribute__((__always_inline__)) +vec_vmhraddshs(vector short __a, vector short __b, vector short __c) { + return __builtin_altivec_vmhraddshs(__a, __b, __c); +} + +/* vec_msum */ + +static __inline__ vector int __ATTRS_o_ai vec_msum(vector signed char __a, + vector unsigned char __b, + vector int __c) { + return __builtin_altivec_vmsummbm(__a, __b, __c); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_msum(vector unsigned char __a, vector unsigned char __b, + vector unsigned int __c) { + return __builtin_altivec_vmsumubm(__a, __b, __c); +} + +static __inline__ vector int __ATTRS_o_ai vec_msum(vector short __a, + vector short __b, + vector int __c) { + return __builtin_altivec_vmsumshm(__a, __b, __c); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_msum(vector unsigned short __a, vector unsigned short __b, + vector unsigned int __c) { + return __builtin_altivec_vmsumuhm(__a, __b, __c); +} + +/* vec_vmsummbm */ + +static __inline__ vector int __attribute__((__always_inline__)) +vec_vmsummbm(vector signed char __a, vector unsigned char __b, vector int __c) { + return __builtin_altivec_vmsummbm(__a, __b, __c); +} + +/* vec_vmsumubm */ + +static __inline__ vector unsigned int __attribute__((__always_inline__)) +vec_vmsumubm(vector unsigned char __a, vector unsigned char __b, + vector unsigned int __c) { + return __builtin_altivec_vmsumubm(__a, __b, __c); +} + +/* vec_vmsumshm */ + +static __inline__ vector int __attribute__((__always_inline__)) +vec_vmsumshm(vector short __a, vector short __b, vector int __c) { + return __builtin_altivec_vmsumshm(__a, __b, __c); +} + +/* vec_vmsumuhm */ + +static __inline__ vector unsigned int __attribute__((__always_inline__)) +vec_vmsumuhm(vector unsigned short __a, vector unsigned short __b, + vector unsigned int __c) { + return __builtin_altivec_vmsumuhm(__a, __b, __c); +} + +/* vec_msums */ + +static __inline__ vector int __ATTRS_o_ai vec_msums(vector short __a, + vector short __b, + vector int __c) { + return __builtin_altivec_vmsumshs(__a, __b, __c); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_msums(vector unsigned short __a, vector unsigned short __b, + vector unsigned int __c) { + return __builtin_altivec_vmsumuhs(__a, __b, __c); +} + +/* vec_vmsumshs */ + +static __inline__ vector int __attribute__((__always_inline__)) +vec_vmsumshs(vector short __a, vector short __b, vector int __c) { + return __builtin_altivec_vmsumshs(__a, __b, __c); +} + +/* vec_vmsumuhs */ + +static __inline__ vector unsigned int __attribute__((__always_inline__)) +vec_vmsumuhs(vector unsigned short __a, vector unsigned short __b, + vector unsigned int __c) { + return __builtin_altivec_vmsumuhs(__a, __b, __c); +} + +/* vec_mtvscr */ + +static __inline__ void __ATTRS_o_ai vec_mtvscr(vector signed char __a) { + __builtin_altivec_mtvscr((vector int)__a); +} + +static __inline__ void __ATTRS_o_ai vec_mtvscr(vector unsigned char __a) { + __builtin_altivec_mtvscr((vector int)__a); +} + +static __inline__ void __ATTRS_o_ai vec_mtvscr(vector bool char __a) { + __builtin_altivec_mtvscr((vector int)__a); +} + +static __inline__ void __ATTRS_o_ai vec_mtvscr(vector short __a) { + __builtin_altivec_mtvscr((vector int)__a); +} + +static __inline__ void __ATTRS_o_ai vec_mtvscr(vector unsigned short __a) { + __builtin_altivec_mtvscr((vector int)__a); +} + +static __inline__ void __ATTRS_o_ai vec_mtvscr(vector bool short __a) { + __builtin_altivec_mtvscr((vector int)__a); +} + +static __inline__ void __ATTRS_o_ai vec_mtvscr(vector pixel __a) { + __builtin_altivec_mtvscr((vector int)__a); +} + +static __inline__ void __ATTRS_o_ai vec_mtvscr(vector int __a) { + __builtin_altivec_mtvscr((vector int)__a); +} + +static __inline__ void __ATTRS_o_ai vec_mtvscr(vector unsigned int __a) { + __builtin_altivec_mtvscr((vector int)__a); +} + +static __inline__ void __ATTRS_o_ai vec_mtvscr(vector bool int __a) { + __builtin_altivec_mtvscr((vector int)__a); +} + +static __inline__ void __ATTRS_o_ai vec_mtvscr(vector float __a) { + __builtin_altivec_mtvscr((vector int)__a); +} + +/* vec_mul */ + +/* Integer vector multiplication will involve multiplication of the odd/even + elements separately, then truncating the results and moving to the + result vector. +*/ +static __inline__ vector signed char __ATTRS_o_ai +vec_mul(vector signed char __a, vector signed char __b) { + return __a * __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_mul(vector unsigned char __a, vector unsigned char __b) { + return __a * __b; +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_mul(vector signed short __a, vector signed short __b) { + return __a * __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_mul(vector unsigned short __a, vector unsigned short __b) { + return __a * __b; +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_mul(vector signed int __a, vector signed int __b) { + return __a * __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_mul(vector unsigned int __a, vector unsigned int __b) { + return __a * __b; +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_mul(vector signed long long __a, vector signed long long __b) { + return __a * __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_mul(vector unsigned long long __a, vector unsigned long long __b) { + return __a * __b; +} +#endif + +static __inline__ vector float __ATTRS_o_ai vec_mul(vector float __a, + vector float __b) { + return __a * __b; +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_mul(vector double __a, + vector double __b) { + return __a * __b; +} +#endif + +/* The vmulos* and vmules* instructions have a big endian bias, so + we must reverse the meaning of "even" and "odd" for little endian. */ + +/* vec_mule */ + +static __inline__ vector short __ATTRS_o_ai vec_mule(vector signed char __a, + vector signed char __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulosb(__a, __b); +#else + return __builtin_altivec_vmulesb(__a, __b); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_mule(vector unsigned char __a, vector unsigned char __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmuloub(__a, __b); +#else + return __builtin_altivec_vmuleub(__a, __b); +#endif +} + +static __inline__ vector int __ATTRS_o_ai vec_mule(vector short __a, + vector short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulosh(__a, __b); +#else + return __builtin_altivec_vmulesh(__a, __b); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_mule(vector unsigned short __a, vector unsigned short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulouh(__a, __b); +#else + return __builtin_altivec_vmuleuh(__a, __b); +#endif +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_mule(vector signed int __a, vector signed int __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulosw(__a, __b); +#else + return __builtin_altivec_vmulesw(__a, __b); +#endif +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_mule(vector unsigned int __a, vector unsigned int __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulouw(__a, __b); +#else + return __builtin_altivec_vmuleuw(__a, __b); +#endif +} +#endif + +/* vec_vmulesb */ + +static __inline__ vector short __attribute__((__always_inline__)) +vec_vmulesb(vector signed char __a, vector signed char __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulosb(__a, __b); +#else + return __builtin_altivec_vmulesb(__a, __b); +#endif +} + +/* vec_vmuleub */ + +static __inline__ vector unsigned short __attribute__((__always_inline__)) +vec_vmuleub(vector unsigned char __a, vector unsigned char __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmuloub(__a, __b); +#else + return __builtin_altivec_vmuleub(__a, __b); +#endif +} + +/* vec_vmulesh */ + +static __inline__ vector int __attribute__((__always_inline__)) +vec_vmulesh(vector short __a, vector short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulosh(__a, __b); +#else + return __builtin_altivec_vmulesh(__a, __b); +#endif +} + +/* vec_vmuleuh */ + +static __inline__ vector unsigned int __attribute__((__always_inline__)) +vec_vmuleuh(vector unsigned short __a, vector unsigned short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulouh(__a, __b); +#else + return __builtin_altivec_vmuleuh(__a, __b); +#endif +} + +/* vec_mulo */ + +static __inline__ vector short __ATTRS_o_ai vec_mulo(vector signed char __a, + vector signed char __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulesb(__a, __b); +#else + return __builtin_altivec_vmulosb(__a, __b); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_mulo(vector unsigned char __a, vector unsigned char __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmuleub(__a, __b); +#else + return __builtin_altivec_vmuloub(__a, __b); +#endif +} + +static __inline__ vector int __ATTRS_o_ai vec_mulo(vector short __a, + vector short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulesh(__a, __b); +#else + return __builtin_altivec_vmulosh(__a, __b); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_mulo(vector unsigned short __a, vector unsigned short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmuleuh(__a, __b); +#else + return __builtin_altivec_vmulouh(__a, __b); +#endif +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_mulo(vector signed int __a, vector signed int __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulesw(__a, __b); +#else + return __builtin_altivec_vmulosw(__a, __b); +#endif +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_mulo(vector unsigned int __a, vector unsigned int __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmuleuw(__a, __b); +#else + return __builtin_altivec_vmulouw(__a, __b); +#endif +} +#endif + +/* vec_vmulosb */ + +static __inline__ vector short __attribute__((__always_inline__)) +vec_vmulosb(vector signed char __a, vector signed char __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulesb(__a, __b); +#else + return __builtin_altivec_vmulosb(__a, __b); +#endif +} + +/* vec_vmuloub */ + +static __inline__ vector unsigned short __attribute__((__always_inline__)) +vec_vmuloub(vector unsigned char __a, vector unsigned char __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmuleub(__a, __b); +#else + return __builtin_altivec_vmuloub(__a, __b); +#endif +} + +/* vec_vmulosh */ + +static __inline__ vector int __attribute__((__always_inline__)) +vec_vmulosh(vector short __a, vector short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulesh(__a, __b); +#else + return __builtin_altivec_vmulosh(__a, __b); +#endif +} + +/* vec_vmulouh */ + +static __inline__ vector unsigned int __attribute__((__always_inline__)) +vec_vmulouh(vector unsigned short __a, vector unsigned short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmuleuh(__a, __b); +#else + return __builtin_altivec_vmulouh(__a, __b); +#endif +} + +/* vec_nand */ + +#ifdef __POWER8_VECTOR__ +static __inline__ vector signed char __ATTRS_o_ai +vec_nand(vector signed char __a, vector signed char __b) { + return ~(__a & __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_nand(vector signed char __a, vector bool char __b) { + return ~(__a & __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_nand(vector bool char __a, vector signed char __b) { + return ~(__a & __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_nand(vector unsigned char __a, vector unsigned char __b) { + return ~(__a & __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_nand(vector unsigned char __a, vector bool char __b) { + return ~(__a & __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_nand(vector bool char __a, vector unsigned char __b) { + return ~(__a & __b); +} + +static __inline__ vector bool char __ATTRS_o_ai vec_nand(vector bool char __a, + vector bool char __b) { + return ~(__a & __b); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_nand(vector signed short __a, vector signed short __b) { + return ~(__a & __b); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_nand(vector signed short __a, vector bool short __b) { + return ~(__a & __b); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_nand(vector bool short __a, vector signed short __b) { + return ~(__a & __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_nand(vector unsigned short __a, vector unsigned short __b) { + return ~(__a & __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_nand(vector unsigned short __a, vector bool short __b) { + return ~(__a & __b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_nand(vector bool short __a, vector bool short __b) { + return ~(__a & __b); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_nand(vector signed int __a, vector signed int __b) { + return ~(__a & __b); +} + +static __inline__ vector signed int __ATTRS_o_ai vec_nand(vector signed int __a, + vector bool int __b) { + return ~(__a & __b); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_nand(vector bool int __a, vector signed int __b) { + return ~(__a & __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_nand(vector unsigned int __a, vector unsigned int __b) { + return ~(__a & __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_nand(vector unsigned int __a, vector bool int __b) { + return ~(__a & __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_nand(vector bool int __a, vector unsigned int __b) { + return ~(__a & __b); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_nand(vector bool int __a, + vector bool int __b) { + return ~(__a & __b); +} + +static __inline__ vector float __ATTRS_o_ai +vec_nand(vector float __a, vector float __b) { + return (vector float)(~((vector unsigned int)__a & + (vector unsigned int)__b)); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_nand(vector signed long long __a, vector signed long long __b) { + return ~(__a & __b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_nand(vector signed long long __a, vector bool long long __b) { + return ~(__a & __b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_nand(vector bool long long __a, vector signed long long __b) { + return ~(__a & __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_nand(vector unsigned long long __a, vector unsigned long long __b) { + return ~(__a & __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_nand(vector unsigned long long __a, vector bool long long __b) { + return ~(__a & __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_nand(vector bool long long __a, vector unsigned long long __b) { + return ~(__a & __b); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_nand(vector bool long long __a, vector bool long long __b) { + return ~(__a & __b); +} + +static __inline__ vector double __ATTRS_o_ai +vec_nand(vector double __a, vector double __b) { + return (vector double)(~((vector unsigned long long)__a & + (vector unsigned long long)__b)); +} + +#endif + +/* vec_nmadd */ + +#ifdef __VSX__ +static __inline__ vector float __ATTRS_o_ai vec_nmadd(vector float __a, + vector float __b, + vector float __c) { + return __builtin_vsx_xvnmaddasp(__a, __b, __c); +} + +static __inline__ vector double __ATTRS_o_ai vec_nmadd(vector double __a, + vector double __b, + vector double __c) { + return __builtin_vsx_xvnmaddadp(__a, __b, __c); +} +#endif + +/* vec_nmsub */ + +static __inline__ vector float __ATTRS_o_ai vec_nmsub(vector float __a, + vector float __b, + vector float __c) { +#ifdef __VSX__ + return __builtin_vsx_xvnmsubasp(__a, __b, __c); +#else + return __builtin_altivec_vnmsubfp(__a, __b, __c); +#endif +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_nmsub(vector double __a, + vector double __b, + vector double __c) { + return __builtin_vsx_xvnmsubadp(__a, __b, __c); +} +#endif + +/* vec_vnmsubfp */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vnmsubfp(vector float __a, vector float __b, vector float __c) { + return __builtin_altivec_vnmsubfp(__a, __b, __c); +} + +/* vec_nor */ + +#define __builtin_altivec_vnor vec_nor + +static __inline__ vector signed char __ATTRS_o_ai +vec_nor(vector signed char __a, vector signed char __b) { + return ~(__a | __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_nor(vector unsigned char __a, vector unsigned char __b) { + return ~(__a | __b); +} + +static __inline__ vector bool char __ATTRS_o_ai vec_nor(vector bool char __a, + vector bool char __b) { + return ~(__a | __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_nor(vector short __a, + vector short __b) { + return ~(__a | __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_nor(vector unsigned short __a, vector unsigned short __b) { + return ~(__a | __b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_nor(vector bool short __a, vector bool short __b) { + return ~(__a | __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_nor(vector int __a, + vector int __b) { + return ~(__a | __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_nor(vector unsigned int __a, vector unsigned int __b) { + return ~(__a | __b); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_nor(vector bool int __a, + vector bool int __b) { + return ~(__a | __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_nor(vector float __a, + vector float __b) { + vector unsigned int __res = + ~((vector unsigned int)__a | (vector unsigned int)__b); + return (vector float)__res; +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_nor(vector double __a, + vector double __b) { + vector unsigned long long __res = + ~((vector unsigned long long)__a | (vector unsigned long long)__b); + return (vector double)__res; +} +#endif + +/* vec_vnor */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vnor(vector signed char __a, vector signed char __b) { + return ~(__a | __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vnor(vector unsigned char __a, vector unsigned char __b) { + return ~(__a | __b); +} + +static __inline__ vector bool char __ATTRS_o_ai vec_vnor(vector bool char __a, + vector bool char __b) { + return ~(__a | __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vnor(vector short __a, + vector short __b) { + return ~(__a | __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vnor(vector unsigned short __a, vector unsigned short __b) { + return ~(__a | __b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vnor(vector bool short __a, vector bool short __b) { + return ~(__a | __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vnor(vector int __a, + vector int __b) { + return ~(__a | __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vnor(vector unsigned int __a, vector unsigned int __b) { + return ~(__a | __b); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_vnor(vector bool int __a, + vector bool int __b) { + return ~(__a | __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_vnor(vector float __a, + vector float __b) { + vector unsigned int __res = + ~((vector unsigned int)__a | (vector unsigned int)__b); + return (vector float)__res; +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_nor(vector signed long long __a, vector signed long long __b) { + return ~(__a | __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_nor(vector unsigned long long __a, vector unsigned long long __b) { + return ~(__a | __b); +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_nor(vector bool long long __a, vector bool long long __b) { + return ~(__a | __b); +} +#endif + +/* vec_or */ + +#define __builtin_altivec_vor vec_or + +static __inline__ vector signed char __ATTRS_o_ai +vec_or(vector signed char __a, vector signed char __b) { + return __a | __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_or(vector bool char __a, vector signed char __b) { + return (vector signed char)__a | __b; +} + +static __inline__ vector signed char __ATTRS_o_ai vec_or(vector signed char __a, + vector bool char __b) { + return __a | (vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_or(vector unsigned char __a, vector unsigned char __b) { + return __a | __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_or(vector bool char __a, vector unsigned char __b) { + return (vector unsigned char)__a | __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_or(vector unsigned char __a, vector bool char __b) { + return __a | (vector unsigned char)__b; +} + +static __inline__ vector bool char __ATTRS_o_ai vec_or(vector bool char __a, + vector bool char __b) { + return __a | __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_or(vector short __a, + vector short __b) { + return __a | __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_or(vector bool short __a, + vector short __b) { + return (vector short)__a | __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_or(vector short __a, + vector bool short __b) { + return __a | (vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_or(vector unsigned short __a, vector unsigned short __b) { + return __a | __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_or(vector bool short __a, vector unsigned short __b) { + return (vector unsigned short)__a | __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_or(vector unsigned short __a, vector bool short __b) { + return __a | (vector unsigned short)__b; +} + +static __inline__ vector bool short __ATTRS_o_ai vec_or(vector bool short __a, + vector bool short __b) { + return __a | __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_or(vector int __a, + vector int __b) { + return __a | __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_or(vector bool int __a, + vector int __b) { + return (vector int)__a | __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_or(vector int __a, + vector bool int __b) { + return __a | (vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_or(vector unsigned int __a, vector unsigned int __b) { + return __a | __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_or(vector bool int __a, vector unsigned int __b) { + return (vector unsigned int)__a | __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_or(vector unsigned int __a, vector bool int __b) { + return __a | (vector unsigned int)__b; +} + +static __inline__ vector bool int __ATTRS_o_ai vec_or(vector bool int __a, + vector bool int __b) { + return __a | __b; +} + +static __inline__ vector float __ATTRS_o_ai vec_or(vector float __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a | (vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_or(vector bool int __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a | (vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_or(vector float __a, + vector bool int __b) { + vector unsigned int __res = + (vector unsigned int)__a | (vector unsigned int)__b; + return (vector float)__res; +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_or(vector bool long long __a, + vector double __b) { + return (vector unsigned long long)__a | (vector unsigned long long)__b; +} + +static __inline__ vector double __ATTRS_o_ai vec_or(vector double __a, + vector bool long long __b) { + return (vector unsigned long long)__a | (vector unsigned long long)__b; +} + +static __inline__ vector double __ATTRS_o_ai vec_or(vector double __a, + vector double __b) { + vector unsigned long long __res = + (vector unsigned long long)__a | (vector unsigned long long)__b; + return (vector double)__res; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_or(vector signed long long __a, vector signed long long __b) { + return __a | __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_or(vector bool long long __a, vector signed long long __b) { + return (vector signed long long)__a | __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_or(vector signed long long __a, vector bool long long __b) { + return __a | (vector signed long long)__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_or(vector unsigned long long __a, vector unsigned long long __b) { + return __a | __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_or(vector bool long long __a, vector unsigned long long __b) { + return (vector unsigned long long)__a | __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_or(vector unsigned long long __a, vector bool long long __b) { + return __a | (vector unsigned long long)__b; +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_or(vector bool long long __a, vector bool long long __b) { + return __a | __b; +} +#endif + +#ifdef __POWER8_VECTOR__ +static __inline__ vector signed char __ATTRS_o_ai +vec_orc(vector signed char __a, vector signed char __b) { + return __a | ~__b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_orc(vector signed char __a, vector bool char __b) { + return __a | ~__b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_orc(vector bool char __a, vector signed char __b) { + return __a | ~__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_orc(vector unsigned char __a, vector unsigned char __b) { + return __a | ~__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_orc(vector unsigned char __a, vector bool char __b) { + return __a | ~__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_orc(vector bool char __a, vector unsigned char __b) { + return __a | ~__b; +} + +static __inline__ vector bool char __ATTRS_o_ai vec_orc(vector bool char __a, + vector bool char __b) { + return __a | ~__b; +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_orc(vector signed short __a, vector signed short __b) { + return __a | ~__b; +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_orc(vector signed short __a, vector bool short __b) { + return __a | ~__b; +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_orc(vector bool short __a, vector signed short __b) { + return __a | ~__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_orc(vector unsigned short __a, vector unsigned short __b) { + return __a | ~__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_orc(vector unsigned short __a, vector bool short __b) { + return __a | ~__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_orc(vector bool short __a, vector unsigned short __b) { + return __a | ~__b; +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_orc(vector bool short __a, vector bool short __b) { + return __a | ~__b; +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_orc(vector signed int __a, vector signed int __b) { + return __a | ~__b; +} + +static __inline__ vector signed int __ATTRS_o_ai vec_orc(vector signed int __a, + vector bool int __b) { + return __a | ~__b; +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_orc(vector bool int __a, vector signed int __b) { + return __a | ~__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_orc(vector unsigned int __a, vector unsigned int __b) { + return __a | ~__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_orc(vector unsigned int __a, vector bool int __b) { + return __a | ~__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_orc(vector bool int __a, vector unsigned int __b) { + return __a | ~__b; +} + +static __inline__ vector bool int __ATTRS_o_ai vec_orc(vector bool int __a, + vector bool int __b) { + return __a | ~__b; +} + +static __inline__ vector float __ATTRS_o_ai +vec_orc(vector bool int __a, vector float __b) { + return (vector float)(__a | ~(vector unsigned int)__b); +} + +static __inline__ vector float __ATTRS_o_ai +vec_orc(vector float __a, vector bool int __b) { + return (vector float)((vector unsigned int)__a | ~__b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_orc(vector signed long long __a, vector signed long long __b) { + return __a | ~__b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_orc(vector signed long long __a, vector bool long long __b) { + return __a | ~__b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_orc(vector bool long long __a, vector signed long long __b) { + return __a | ~__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_orc(vector unsigned long long __a, vector unsigned long long __b) { + return __a | ~__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_orc(vector unsigned long long __a, vector bool long long __b) { + return __a | ~__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_orc(vector bool long long __a, vector unsigned long long __b) { + return __a | ~__b; +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_orc(vector bool long long __a, vector bool long long __b) { + return __a | ~__b; +} + +static __inline__ vector double __ATTRS_o_ai +vec_orc(vector double __a, vector bool long long __b) { + return (vector double)((vector unsigned long long)__a | ~__b); +} + +static __inline__ vector double __ATTRS_o_ai +vec_orc(vector bool long long __a, vector double __b) { + return (vector double)(__a | ~(vector unsigned long long)__b); +} +#endif + +/* vec_vor */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vor(vector signed char __a, vector signed char __b) { + return __a | __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vor(vector bool char __a, vector signed char __b) { + return (vector signed char)__a | __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vor(vector signed char __a, vector bool char __b) { + return __a | (vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vor(vector unsigned char __a, vector unsigned char __b) { + return __a | __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vor(vector bool char __a, vector unsigned char __b) { + return (vector unsigned char)__a | __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vor(vector unsigned char __a, vector bool char __b) { + return __a | (vector unsigned char)__b; +} + +static __inline__ vector bool char __ATTRS_o_ai vec_vor(vector bool char __a, + vector bool char __b) { + return __a | __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vor(vector short __a, + vector short __b) { + return __a | __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vor(vector bool short __a, + vector short __b) { + return (vector short)__a | __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vor(vector short __a, + vector bool short __b) { + return __a | (vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vor(vector unsigned short __a, vector unsigned short __b) { + return __a | __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vor(vector bool short __a, vector unsigned short __b) { + return (vector unsigned short)__a | __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vor(vector unsigned short __a, vector bool short __b) { + return __a | (vector unsigned short)__b; +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vor(vector bool short __a, vector bool short __b) { + return __a | __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vor(vector int __a, + vector int __b) { + return __a | __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vor(vector bool int __a, + vector int __b) { + return (vector int)__a | __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vor(vector int __a, + vector bool int __b) { + return __a | (vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vor(vector unsigned int __a, vector unsigned int __b) { + return __a | __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vor(vector bool int __a, vector unsigned int __b) { + return (vector unsigned int)__a | __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vor(vector unsigned int __a, vector bool int __b) { + return __a | (vector unsigned int)__b; +} + +static __inline__ vector bool int __ATTRS_o_ai vec_vor(vector bool int __a, + vector bool int __b) { + return __a | __b; +} + +static __inline__ vector float __ATTRS_o_ai vec_vor(vector float __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a | (vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_vor(vector bool int __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a | (vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_vor(vector float __a, + vector bool int __b) { + vector unsigned int __res = + (vector unsigned int)__a | (vector unsigned int)__b; + return (vector float)__res; +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_vor(vector signed long long __a, vector signed long long __b) { + return __a | __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_vor(vector bool long long __a, vector signed long long __b) { + return (vector signed long long)__a | __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_vor(vector signed long long __a, vector bool long long __b) { + return __a | (vector signed long long)__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vor(vector unsigned long long __a, vector unsigned long long __b) { + return __a | __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vor(vector bool long long __a, vector unsigned long long __b) { + return (vector unsigned long long)__a | __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vor(vector unsigned long long __a, vector bool long long __b) { + return __a | (vector unsigned long long)__b; +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_vor(vector bool long long __a, vector bool long long __b) { + return __a | __b; +} +#endif + +/* vec_pack */ + +/* The various vector pack instructions have a big-endian bias, so for + little endian we must handle reversed element numbering. */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_pack(vector signed short __a, vector signed short __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector signed char)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, + 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E)); +#else + return (vector signed char)vec_perm( + __a, __b, + (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, + 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F)); +#endif +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_pack(vector unsigned short __a, vector unsigned short __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector unsigned char)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, + 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E)); +#else + return (vector unsigned char)vec_perm( + __a, __b, + (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, + 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F)); +#endif +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_pack(vector bool short __a, vector bool short __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool char)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, + 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E)); +#else + return (vector bool char)vec_perm( + __a, __b, + (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, + 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F)); +#endif +} + +static __inline__ vector short __ATTRS_o_ai vec_pack(vector int __a, + vector int __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector short)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D, + 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D)); +#else + return (vector short)vec_perm( + __a, __b, + (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F, + 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F)); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_pack(vector unsigned int __a, vector unsigned int __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector unsigned short)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D, + 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D)); +#else + return (vector unsigned short)vec_perm( + __a, __b, + (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F, + 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F)); +#endif +} + +static __inline__ vector bool short __ATTRS_o_ai vec_pack(vector bool int __a, + vector bool int __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool short)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D, + 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D)); +#else + return (vector bool short)vec_perm( + __a, __b, + (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F, + 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F)); +#endif +} + +#ifdef __VSX__ +static __inline__ vector signed int __ATTRS_o_ai +vec_pack(vector signed long long __a, vector signed long long __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector signed int)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B, + 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B)); +#else + return (vector signed int)vec_perm( + __a, __b, + (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F, + 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F)); +#endif +} +static __inline__ vector unsigned int __ATTRS_o_ai +vec_pack(vector unsigned long long __a, vector unsigned long long __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector unsigned int)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B, + 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B)); +#else + return (vector unsigned int)vec_perm( + __a, __b, + (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F, + 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F)); +#endif +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_pack(vector bool long long __a, vector bool long long __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool int)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B, + 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B)); +#else + return (vector bool int)vec_perm( + __a, __b, + (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F, + 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F)); +#endif +} + +static __inline__ vector float __ATTRS_o_ai +vec_pack(vector double __a, vector double __b) { + return (vector float) (__a[0], __a[1], __b[0], __b[1]); +} +#endif + +#ifdef __POWER9_VECTOR__ +static __inline__ vector unsigned short __ATTRS_o_ai +vec_pack_to_short_fp32(vector float __a, vector float __b) { + vector float __resa = __builtin_vsx_xvcvsphp(__a); + vector float __resb = __builtin_vsx_xvcvsphp(__b); +#ifdef __LITTLE_ENDIAN__ + return (vector unsigned short)vec_mergee(__resa, __resb); +#else + return (vector unsigned short)vec_mergeo(__resa, __resb); +#endif +} + +#endif +/* vec_vpkuhum */ + +#define __builtin_altivec_vpkuhum vec_vpkuhum + +static __inline__ vector signed char __ATTRS_o_ai +vec_vpkuhum(vector signed short __a, vector signed short __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector signed char)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, + 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E)); +#else + return (vector signed char)vec_perm( + __a, __b, + (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, + 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F)); +#endif +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vpkuhum(vector unsigned short __a, vector unsigned short __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector unsigned char)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, + 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E)); +#else + return (vector unsigned char)vec_perm( + __a, __b, + (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, + 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F)); +#endif +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_vpkuhum(vector bool short __a, vector bool short __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool char)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, + 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E)); +#else + return (vector bool char)vec_perm( + __a, __b, + (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, + 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F)); +#endif +} + +/* vec_vpkuwum */ + +#define __builtin_altivec_vpkuwum vec_vpkuwum + +static __inline__ vector short __ATTRS_o_ai vec_vpkuwum(vector int __a, + vector int __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector short)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D, + 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D)); +#else + return (vector short)vec_perm( + __a, __b, + (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F, + 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F)); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vpkuwum(vector unsigned int __a, vector unsigned int __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector unsigned short)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D, + 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D)); +#else + return (vector unsigned short)vec_perm( + __a, __b, + (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F, + 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F)); +#endif +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vpkuwum(vector bool int __a, vector bool int __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool short)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D, + 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D)); +#else + return (vector bool short)vec_perm( + __a, __b, + (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F, + 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F)); +#endif +} + +/* vec_vpkudum */ + +#ifdef __POWER8_VECTOR__ +#define __builtin_altivec_vpkudum vec_vpkudum + +static __inline__ vector int __ATTRS_o_ai vec_vpkudum(vector long long __a, + vector long long __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector int)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B, + 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B)); +#else + return (vector int)vec_perm( + __a, __b, + (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F, + 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F)); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vpkudum(vector unsigned long long __a, vector unsigned long long __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector unsigned int)vec_perm( + __a, __b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B, + 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B)); +#else + return (vector unsigned int)vec_perm( + __a, __b, + (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F, + 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F)); +#endif +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_vpkudum(vector bool long long __a, vector bool long long __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool int)vec_perm( + (vector long long)__a, (vector long long)__b, + (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B, + 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B)); +#else + return (vector bool int)vec_perm( + (vector long long)__a, (vector long long)__b, + (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F, + 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F)); +#endif +} +#endif + +/* vec_packpx */ + +static __inline__ vector pixel __attribute__((__always_inline__)) +vec_packpx(vector unsigned int __a, vector unsigned int __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector pixel)__builtin_altivec_vpkpx(__b, __a); +#else + return (vector pixel)__builtin_altivec_vpkpx(__a, __b); +#endif +} + +/* vec_vpkpx */ + +static __inline__ vector pixel __attribute__((__always_inline__)) +vec_vpkpx(vector unsigned int __a, vector unsigned int __b) { +#ifdef __LITTLE_ENDIAN__ + return (vector pixel)__builtin_altivec_vpkpx(__b, __a); +#else + return (vector pixel)__builtin_altivec_vpkpx(__a, __b); +#endif +} + +/* vec_packs */ + +static __inline__ vector signed char __ATTRS_o_ai vec_packs(vector short __a, + vector short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkshss(__b, __a); +#else + return __builtin_altivec_vpkshss(__a, __b); +#endif +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_packs(vector unsigned short __a, vector unsigned short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkuhus(__b, __a); +#else + return __builtin_altivec_vpkuhus(__a, __b); +#endif +} + +static __inline__ vector signed short __ATTRS_o_ai vec_packs(vector int __a, + vector int __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkswss(__b, __a); +#else + return __builtin_altivec_vpkswss(__a, __b); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_packs(vector unsigned int __a, vector unsigned int __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkuwus(__b, __a); +#else + return __builtin_altivec_vpkuwus(__a, __b); +#endif +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector int __ATTRS_o_ai vec_packs(vector long long __a, + vector long long __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpksdss(__b, __a); +#else + return __builtin_altivec_vpksdss(__a, __b); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_packs(vector unsigned long long __a, vector unsigned long long __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkudus(__b, __a); +#else + return __builtin_altivec_vpkudus(__a, __b); +#endif +} +#endif + +/* vec_vpkshss */ + +static __inline__ vector signed char __attribute__((__always_inline__)) +vec_vpkshss(vector short __a, vector short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkshss(__b, __a); +#else + return __builtin_altivec_vpkshss(__a, __b); +#endif +} + +/* vec_vpksdss */ + +#ifdef __POWER8_VECTOR__ +static __inline__ vector int __ATTRS_o_ai vec_vpksdss(vector long long __a, + vector long long __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpksdss(__b, __a); +#else + return __builtin_altivec_vpksdss(__a, __b); +#endif +} +#endif + +/* vec_vpkuhus */ + +static __inline__ vector unsigned char __attribute__((__always_inline__)) +vec_vpkuhus(vector unsigned short __a, vector unsigned short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkuhus(__b, __a); +#else + return __builtin_altivec_vpkuhus(__a, __b); +#endif +} + +/* vec_vpkudus */ + +#ifdef __POWER8_VECTOR__ +static __inline__ vector unsigned int __attribute__((__always_inline__)) +vec_vpkudus(vector unsigned long long __a, vector unsigned long long __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkudus(__b, __a); +#else + return __builtin_altivec_vpkudus(__a, __b); +#endif +} +#endif + +/* vec_vpkswss */ + +static __inline__ vector signed short __attribute__((__always_inline__)) +vec_vpkswss(vector int __a, vector int __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkswss(__b, __a); +#else + return __builtin_altivec_vpkswss(__a, __b); +#endif +} + +/* vec_vpkuwus */ + +static __inline__ vector unsigned short __attribute__((__always_inline__)) +vec_vpkuwus(vector unsigned int __a, vector unsigned int __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkuwus(__b, __a); +#else + return __builtin_altivec_vpkuwus(__a, __b); +#endif +} + +/* vec_packsu */ + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_packsu(vector short __a, vector short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkshus(__b, __a); +#else + return __builtin_altivec_vpkshus(__a, __b); +#endif +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_packsu(vector unsigned short __a, vector unsigned short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkuhus(__b, __a); +#else + return __builtin_altivec_vpkuhus(__a, __b); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_packsu(vector int __a, vector int __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkswus(__b, __a); +#else + return __builtin_altivec_vpkswus(__a, __b); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_packsu(vector unsigned int __a, vector unsigned int __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkuwus(__b, __a); +#else + return __builtin_altivec_vpkuwus(__a, __b); +#endif +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector unsigned int __ATTRS_o_ai +vec_packsu(vector long long __a, vector long long __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpksdus(__b, __a); +#else + return __builtin_altivec_vpksdus(__a, __b); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_packsu(vector unsigned long long __a, vector unsigned long long __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkudus(__b, __a); +#else + return __builtin_altivec_vpkudus(__a, __b); +#endif +} +#endif + +/* vec_vpkshus */ + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vpkshus(vector short __a, vector short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkshus(__b, __a); +#else + return __builtin_altivec_vpkshus(__a, __b); +#endif +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vpkshus(vector unsigned short __a, vector unsigned short __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkuhus(__b, __a); +#else + return __builtin_altivec_vpkuhus(__a, __b); +#endif +} + +/* vec_vpkswus */ + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vpkswus(vector int __a, vector int __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkswus(__b, __a); +#else + return __builtin_altivec_vpkswus(__a, __b); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vpkswus(vector unsigned int __a, vector unsigned int __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkuwus(__b, __a); +#else + return __builtin_altivec_vpkuwus(__a, __b); +#endif +} + +/* vec_vpksdus */ + +#ifdef __POWER8_VECTOR__ +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vpksdus(vector long long __a, vector long long __b) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpksdus(__b, __a); +#else + return __builtin_altivec_vpksdus(__a, __b); +#endif +} +#endif + +/* vec_perm */ + +// The vperm instruction is defined architecturally with a big-endian bias. +// For little endian, we swap the input operands and invert the permute +// control vector. Only the rightmost 5 bits matter, so we could use +// a vector of all 31s instead of all 255s to perform the inversion. +// However, when the PCV is not a constant, using 255 has an advantage +// in that the vec_xor can be recognized as a vec_nor (and for P8 and +// later, possibly a vec_nand). + +static __inline__ vector signed char __ATTRS_o_ai vec_perm( + vector signed char __a, vector signed char __b, vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector signed char)__builtin_altivec_vperm_4si((vector int)__b, + (vector int)__a, __d); +#else + return (vector signed char)__builtin_altivec_vperm_4si((vector int)__a, + (vector int)__b, __c); +#endif +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_perm(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector unsigned char)__builtin_altivec_vperm_4si( + (vector int)__b, (vector int)__a, __d); +#else + return (vector unsigned char)__builtin_altivec_vperm_4si( + (vector int)__a, (vector int)__b, __c); +#endif +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_perm(vector bool char __a, vector bool char __b, vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector bool char)__builtin_altivec_vperm_4si((vector int)__b, + (vector int)__a, __d); +#else + return (vector bool char)__builtin_altivec_vperm_4si((vector int)__a, + (vector int)__b, __c); +#endif +} + +static __inline__ vector short __ATTRS_o_ai vec_perm(vector signed short __a, + vector signed short __b, + vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector signed short)__builtin_altivec_vperm_4si((vector int)__b, + (vector int)__a, __d); +#else + return (vector signed short)__builtin_altivec_vperm_4si((vector int)__a, + (vector int)__b, __c); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_perm(vector unsigned short __a, vector unsigned short __b, + vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector unsigned short)__builtin_altivec_vperm_4si( + (vector int)__b, (vector int)__a, __d); +#else + return (vector unsigned short)__builtin_altivec_vperm_4si( + (vector int)__a, (vector int)__b, __c); +#endif +} + +static __inline__ vector bool short __ATTRS_o_ai vec_perm( + vector bool short __a, vector bool short __b, vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector bool short)__builtin_altivec_vperm_4si((vector int)__b, + (vector int)__a, __d); +#else + return (vector bool short)__builtin_altivec_vperm_4si((vector int)__a, + (vector int)__b, __c); +#endif +} + +static __inline__ vector pixel __ATTRS_o_ai vec_perm(vector pixel __a, + vector pixel __b, + vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector pixel)__builtin_altivec_vperm_4si((vector int)__b, + (vector int)__a, __d); +#else + return (vector pixel)__builtin_altivec_vperm_4si((vector int)__a, + (vector int)__b, __c); +#endif +} + +static __inline__ vector int __ATTRS_o_ai vec_perm(vector signed int __a, + vector signed int __b, + vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector signed int)__builtin_altivec_vperm_4si(__b, __a, __d); +#else + return (vector signed int)__builtin_altivec_vperm_4si(__a, __b, __c); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_perm(vector unsigned int __a, vector unsigned int __b, + vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector unsigned int)__builtin_altivec_vperm_4si((vector int)__b, + (vector int)__a, __d); +#else + return (vector unsigned int)__builtin_altivec_vperm_4si((vector int)__a, + (vector int)__b, __c); +#endif +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector bool int)__builtin_altivec_vperm_4si((vector int)__b, + (vector int)__a, __d); +#else + return (vector bool int)__builtin_altivec_vperm_4si((vector int)__a, + (vector int)__b, __c); +#endif +} + +static __inline__ vector float __ATTRS_o_ai vec_perm(vector float __a, + vector float __b, + vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector float)__builtin_altivec_vperm_4si((vector int)__b, + (vector int)__a, __d); +#else + return (vector float)__builtin_altivec_vperm_4si((vector int)__a, + (vector int)__b, __c); +#endif +} + +#ifdef __VSX__ +static __inline__ vector long long __ATTRS_o_ai +vec_perm(vector signed long long __a, vector signed long long __b, + vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector signed long long)__builtin_altivec_vperm_4si( + (vector int)__b, (vector int)__a, __d); +#else + return (vector signed long long)__builtin_altivec_vperm_4si( + (vector int)__a, (vector int)__b, __c); +#endif +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_perm(vector unsigned long long __a, vector unsigned long long __b, + vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector unsigned long long)__builtin_altivec_vperm_4si( + (vector int)__b, (vector int)__a, __d); +#else + return (vector unsigned long long)__builtin_altivec_vperm_4si( + (vector int)__a, (vector int)__b, __c); +#endif +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_perm(vector bool long long __a, vector bool long long __b, + vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector bool long long)__builtin_altivec_vperm_4si( + (vector int)__b, (vector int)__a, __d); +#else + return (vector bool long long)__builtin_altivec_vperm_4si( + (vector int)__a, (vector int)__b, __c); +#endif +} + +static __inline__ vector double __ATTRS_o_ai +vec_perm(vector double __a, vector double __b, vector unsigned char __c) { +#ifdef __LITTLE_ENDIAN__ + vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255}; + __d = vec_xor(__c, __d); + return (vector double)__builtin_altivec_vperm_4si((vector int)__b, + (vector int)__a, __d); +#else + return (vector double)__builtin_altivec_vperm_4si((vector int)__a, + (vector int)__b, __c); +#endif +} +#endif + +/* vec_vperm */ + +static __inline__ vector signed char __ATTRS_o_ai vec_vperm( + vector signed char __a, vector signed char __b, vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vperm(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +static __inline__ vector bool char __ATTRS_o_ai vec_vperm( + vector bool char __a, vector bool char __b, vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +static __inline__ vector short __ATTRS_o_ai +vec_vperm(vector short __a, vector short __b, vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vperm(vector unsigned short __a, vector unsigned short __b, + vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +static __inline__ vector bool short __ATTRS_o_ai vec_vperm( + vector bool short __a, vector bool short __b, vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +static __inline__ vector pixel __ATTRS_o_ai +vec_vperm(vector pixel __a, vector pixel __b, vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +static __inline__ vector int __ATTRS_o_ai vec_vperm(vector int __a, + vector int __b, + vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vperm(vector unsigned int __a, vector unsigned int __b, + vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_vperm(vector bool int __a, vector bool int __b, vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +static __inline__ vector float __ATTRS_o_ai +vec_vperm(vector float __a, vector float __b, vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +#ifdef __VSX__ +static __inline__ vector long long __ATTRS_o_ai vec_vperm( + vector long long __a, vector long long __b, vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vperm(vector unsigned long long __a, vector unsigned long long __b, + vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} + +static __inline__ vector double __ATTRS_o_ai +vec_vperm(vector double __a, vector double __b, vector unsigned char __c) { + return vec_perm(__a, __b, __c); +} +#endif + +/* vec_re */ + +static __inline__ vector float __ATTRS_o_ai vec_re(vector float __a) { +#ifdef __VSX__ + return __builtin_vsx_xvresp(__a); +#else + return __builtin_altivec_vrefp(__a); +#endif +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_re(vector double __a) { + return __builtin_vsx_xvredp(__a); +} +#endif + +/* vec_vrefp */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vrefp(vector float __a) { + return __builtin_altivec_vrefp(__a); +} + +/* vec_rl */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_rl(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_altivec_vrlb((vector char)__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_rl(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vrlb((vector char)__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_rl(vector short __a, + vector unsigned short __b) { + return __builtin_altivec_vrlh(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_rl(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)__builtin_altivec_vrlh((vector short)__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_rl(vector int __a, + vector unsigned int __b) { + return __builtin_altivec_vrlw(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_rl(vector unsigned int __a, vector unsigned int __b) { + return (vector unsigned int)__builtin_altivec_vrlw((vector int)__a, __b); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_rl(vector signed long long __a, vector unsigned long long __b) { + return __builtin_altivec_vrld(__a, __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_rl(vector unsigned long long __a, vector unsigned long long __b) { + return __builtin_altivec_vrld(__a, __b); +} +#endif + +/* vec_rlmi */ +#ifdef __POWER9_VECTOR__ +static __inline__ vector unsigned int __ATTRS_o_ai +vec_rlmi(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c) { + return __builtin_altivec_vrlwmi(__a, __c, __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_rlmi(vector unsigned long long __a, vector unsigned long long __b, + vector unsigned long long __c) { + return __builtin_altivec_vrldmi(__a, __c, __b); +} + +/* vec_rlnm */ +static __inline__ vector unsigned int __ATTRS_o_ai +vec_rlnm(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c) { + vector unsigned int OneByte = { 0x8, 0x8, 0x8, 0x8 }; + return __builtin_altivec_vrlwnm(__a, ((__c << OneByte) | __b)); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_rlnm(vector unsigned long long __a, vector unsigned long long __b, + vector unsigned long long __c) { + vector unsigned long long OneByte = { 0x8, 0x8 }; + return __builtin_altivec_vrldnm(__a, ((__c << OneByte) | __b)); +} +#endif + +/* vec_vrlb */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vrlb(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_altivec_vrlb((vector char)__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vrlb(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vrlb((vector char)__a, __b); +} + +/* vec_vrlh */ + +static __inline__ vector short __ATTRS_o_ai +vec_vrlh(vector short __a, vector unsigned short __b) { + return __builtin_altivec_vrlh(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vrlh(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)__builtin_altivec_vrlh((vector short)__a, __b); +} + +/* vec_vrlw */ + +static __inline__ vector int __ATTRS_o_ai vec_vrlw(vector int __a, + vector unsigned int __b) { + return __builtin_altivec_vrlw(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vrlw(vector unsigned int __a, vector unsigned int __b) { + return (vector unsigned int)__builtin_altivec_vrlw((vector int)__a, __b); +} + +/* vec_round */ + +static __inline__ vector float __ATTRS_o_ai vec_round(vector float __a) { +#ifdef __VSX__ + return __builtin_vsx_xvrspi(__a); +#else + return __builtin_altivec_vrfin(__a); +#endif +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_round(vector double __a) { + return __builtin_vsx_xvrdpi(__a); +} + +/* vec_rint */ + +static __inline__ vector float __ATTRS_o_ai vec_rint(vector float __a) { + return __builtin_vsx_xvrspic(__a); +} + +static __inline__ vector double __ATTRS_o_ai vec_rint(vector double __a) { + return __builtin_vsx_xvrdpic(__a); +} + +/* vec_nearbyint */ + +static __inline__ vector float __ATTRS_o_ai vec_nearbyint(vector float __a) { + return __builtin_vsx_xvrspi(__a); +} + +static __inline__ vector double __ATTRS_o_ai vec_nearbyint(vector double __a) { + return __builtin_vsx_xvrdpi(__a); +} +#endif + +/* vec_vrfin */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vrfin(vector float __a) { + return __builtin_altivec_vrfin(__a); +} + +/* vec_sqrt */ + +#ifdef __VSX__ +static __inline__ vector float __ATTRS_o_ai vec_sqrt(vector float __a) { + return __builtin_vsx_xvsqrtsp(__a); +} + +static __inline__ vector double __ATTRS_o_ai vec_sqrt(vector double __a) { + return __builtin_vsx_xvsqrtdp(__a); +} +#endif + +/* vec_rsqrte */ + +static __inline__ vector float __ATTRS_o_ai vec_rsqrte(vector float __a) { +#ifdef __VSX__ + return __builtin_vsx_xvrsqrtesp(__a); +#else + return __builtin_altivec_vrsqrtefp(__a); +#endif +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_rsqrte(vector double __a) { + return __builtin_vsx_xvrsqrtedp(__a); +} +#endif + +/* vec_vrsqrtefp */ + +static __inline__ __vector float __attribute__((__always_inline__)) +vec_vrsqrtefp(vector float __a) { + return __builtin_altivec_vrsqrtefp(__a); +} + +/* vec_sel */ + +#define __builtin_altivec_vsel_4si vec_sel + +static __inline__ vector signed char __ATTRS_o_ai vec_sel( + vector signed char __a, vector signed char __b, vector unsigned char __c) { + return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_sel(vector signed char __a, vector signed char __b, vector bool char __c) { + return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sel(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector unsigned char __ATTRS_o_ai vec_sel( + vector unsigned char __a, vector unsigned char __b, vector bool char __c) { + return (__a & ~(vector unsigned char)__c) | (__b & (vector unsigned char)__c); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_sel(vector bool char __a, vector bool char __b, vector unsigned char __c) { + return (__a & ~(vector bool char)__c) | (__b & (vector bool char)__c); +} + +static __inline__ vector bool char __ATTRS_o_ai vec_sel(vector bool char __a, + vector bool char __b, + vector bool char __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector short __ATTRS_o_ai vec_sel(vector short __a, + vector short __b, + vector unsigned short __c) { + return (__a & ~(vector short)__c) | (__b & (vector short)__c); +} + +static __inline__ vector short __ATTRS_o_ai vec_sel(vector short __a, + vector short __b, + vector bool short __c) { + return (__a & ~(vector short)__c) | (__b & (vector short)__c); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sel(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sel(vector unsigned short __a, vector unsigned short __b, + vector bool short __c) { + return (__a & ~(vector unsigned short)__c) | + (__b & (vector unsigned short)__c); +} + +static __inline__ vector bool short __ATTRS_o_ai vec_sel( + vector bool short __a, vector bool short __b, vector unsigned short __c) { + return (__a & ~(vector bool short)__c) | (__b & (vector bool short)__c); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_sel(vector bool short __a, vector bool short __b, vector bool short __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector int __ATTRS_o_ai vec_sel(vector int __a, + vector int __b, + vector unsigned int __c) { + return (__a & ~(vector int)__c) | (__b & (vector int)__c); +} + +static __inline__ vector int __ATTRS_o_ai vec_sel(vector int __a, + vector int __b, + vector bool int __c) { + return (__a & ~(vector int)__c) | (__b & (vector int)__c); +} + +static __inline__ vector unsigned int __ATTRS_o_ai vec_sel( + vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sel(vector unsigned int __a, vector unsigned int __b, vector bool int __c) { + return (__a & ~(vector unsigned int)__c) | (__b & (vector unsigned int)__c); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_sel(vector bool int __a, vector bool int __b, vector unsigned int __c) { + return (__a & ~(vector bool int)__c) | (__b & (vector bool int)__c); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_sel(vector bool int __a, + vector bool int __b, + vector bool int __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector float __ATTRS_o_ai vec_sel(vector float __a, + vector float __b, + vector unsigned int __c) { + vector int __res = ((vector int)__a & ~(vector int)__c) | + ((vector int)__b & (vector int)__c); + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_sel(vector float __a, + vector float __b, + vector bool int __c) { + vector int __res = ((vector int)__a & ~(vector int)__c) | + ((vector int)__b & (vector int)__c); + return (vector float)__res; +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai +vec_sel(vector double __a, vector double __b, vector bool long long __c) { + vector long long __res = ((vector long long)__a & ~(vector long long)__c) | + ((vector long long)__b & (vector long long)__c); + return (vector double)__res; +} + +static __inline__ vector double __ATTRS_o_ai +vec_sel(vector double __a, vector double __b, vector unsigned long long __c) { + vector long long __res = ((vector long long)__a & ~(vector long long)__c) | + ((vector long long)__b & (vector long long)__c); + return (vector double)__res; +} +#endif + +/* vec_vsel */ + +static __inline__ vector signed char __ATTRS_o_ai vec_vsel( + vector signed char __a, vector signed char __b, vector unsigned char __c) { + return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsel(vector signed char __a, vector signed char __b, vector bool char __c) { + return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsel(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector unsigned char __ATTRS_o_ai vec_vsel( + vector unsigned char __a, vector unsigned char __b, vector bool char __c) { + return (__a & ~(vector unsigned char)__c) | (__b & (vector unsigned char)__c); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_vsel(vector bool char __a, vector bool char __b, vector unsigned char __c) { + return (__a & ~(vector bool char)__c) | (__b & (vector bool char)__c); +} + +static __inline__ vector bool char __ATTRS_o_ai vec_vsel(vector bool char __a, + vector bool char __b, + vector bool char __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector short __ATTRS_o_ai +vec_vsel(vector short __a, vector short __b, vector unsigned short __c) { + return (__a & ~(vector short)__c) | (__b & (vector short)__c); +} + +static __inline__ vector short __ATTRS_o_ai vec_vsel(vector short __a, + vector short __b, + vector bool short __c) { + return (__a & ~(vector short)__c) | (__b & (vector short)__c); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsel(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsel(vector unsigned short __a, vector unsigned short __b, + vector bool short __c) { + return (__a & ~(vector unsigned short)__c) | + (__b & (vector unsigned short)__c); +} + +static __inline__ vector bool short __ATTRS_o_ai vec_vsel( + vector bool short __a, vector bool short __b, vector unsigned short __c) { + return (__a & ~(vector bool short)__c) | (__b & (vector bool short)__c); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vsel(vector bool short __a, vector bool short __b, vector bool short __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector int __ATTRS_o_ai vec_vsel(vector int __a, + vector int __b, + vector unsigned int __c) { + return (__a & ~(vector int)__c) | (__b & (vector int)__c); +} + +static __inline__ vector int __ATTRS_o_ai vec_vsel(vector int __a, + vector int __b, + vector bool int __c) { + return (__a & ~(vector int)__c) | (__b & (vector int)__c); +} + +static __inline__ vector unsigned int __ATTRS_o_ai vec_vsel( + vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector unsigned int __ATTRS_o_ai vec_vsel( + vector unsigned int __a, vector unsigned int __b, vector bool int __c) { + return (__a & ~(vector unsigned int)__c) | (__b & (vector unsigned int)__c); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_vsel(vector bool int __a, vector bool int __b, vector unsigned int __c) { + return (__a & ~(vector bool int)__c) | (__b & (vector bool int)__c); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_vsel(vector bool int __a, + vector bool int __b, + vector bool int __c) { + return (__a & ~__c) | (__b & __c); +} + +static __inline__ vector float __ATTRS_o_ai vec_vsel(vector float __a, + vector float __b, + vector unsigned int __c) { + vector int __res = ((vector int)__a & ~(vector int)__c) | + ((vector int)__b & (vector int)__c); + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_vsel(vector float __a, + vector float __b, + vector bool int __c) { + vector int __res = ((vector int)__a & ~(vector int)__c) | + ((vector int)__b & (vector int)__c); + return (vector float)__res; +} + +/* vec_sl */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_sl(vector signed char __a, vector unsigned char __b) { + return __a << (vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sl(vector unsigned char __a, vector unsigned char __b) { + return __a << __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_sl(vector short __a, + vector unsigned short __b) { + return __a << (vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sl(vector unsigned short __a, vector unsigned short __b) { + return __a << __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_sl(vector int __a, + vector unsigned int __b) { + return __a << (vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sl(vector unsigned int __a, vector unsigned int __b) { + return __a << __b; +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_sl(vector signed long long __a, vector unsigned long long __b) { + return __a << (vector long long)__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sl(vector unsigned long long __a, vector unsigned long long __b) { + return __a << __b; +} +#endif + +/* vec_vslb */ + +#define __builtin_altivec_vslb vec_vslb + +static __inline__ vector signed char __ATTRS_o_ai +vec_vslb(vector signed char __a, vector unsigned char __b) { + return vec_sl(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vslb(vector unsigned char __a, vector unsigned char __b) { + return vec_sl(__a, __b); +} + +/* vec_vslh */ + +#define __builtin_altivec_vslh vec_vslh + +static __inline__ vector short __ATTRS_o_ai +vec_vslh(vector short __a, vector unsigned short __b) { + return vec_sl(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vslh(vector unsigned short __a, vector unsigned short __b) { + return vec_sl(__a, __b); +} + +/* vec_vslw */ + +#define __builtin_altivec_vslw vec_vslw + +static __inline__ vector int __ATTRS_o_ai vec_vslw(vector int __a, + vector unsigned int __b) { + return vec_sl(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vslw(vector unsigned int __a, vector unsigned int __b) { + return vec_sl(__a, __b); +} + +/* vec_sld */ + +#define __builtin_altivec_vsldoi_4si vec_sld + +static __inline__ vector signed char __ATTRS_o_ai vec_sld( + vector signed char __a, vector signed char __b, unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sld(vector unsigned char __a, vector unsigned char __b, + unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_sld(vector bool char __a, vector bool char __b, unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector signed short __ATTRS_o_ai vec_sld( + vector signed short __a, vector signed short __b, unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sld(vector unsigned short __a, vector unsigned short __b, + unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_sld(vector bool short __a, vector bool short __b, unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector pixel __ATTRS_o_ai vec_sld(vector pixel __a, + vector pixel __b, + unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_sld(vector signed int __a, vector signed int __b, unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai vec_sld( + vector unsigned int __a, vector unsigned int __b, unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector bool int __ATTRS_o_ai vec_sld(vector bool int __a, + vector bool int __b, + unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector float __ATTRS_o_ai vec_sld(vector float __a, + vector float __b, + unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +#ifdef __VSX__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_sld(vector bool long long __a, vector bool long long __b, + unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_sld(vector signed long long __a, vector signed long long __b, + unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sld(vector unsigned long long __a, vector unsigned long long __b, + unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector double __ATTRS_o_ai vec_sld(vector double __a, + vector double __b, + unsigned const int __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} +#endif + +/* vec_sldw */ +static __inline__ vector signed char __ATTRS_o_ai vec_sldw( + vector signed char __a, vector signed char __b, unsigned const int __c) { + return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sldw(vector unsigned char __a, vector unsigned char __b, + unsigned const int __c) { + return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} + +static __inline__ vector signed short __ATTRS_o_ai vec_sldw( + vector signed short __a, vector signed short __b, unsigned const int __c) { + return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sldw(vector unsigned short __a, vector unsigned short __b, + unsigned const int __c) { + return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_sldw(vector signed int __a, vector signed int __b, unsigned const int __c) { + return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai vec_sldw( + vector unsigned int __a, vector unsigned int __b, unsigned const int __c) { + return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_sldw(vector signed long long __a, vector signed long long __b, + unsigned const int __c) { + return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sldw(vector unsigned long long __a, vector unsigned long long __b, + unsigned const int __c) { + return vec_sld(__a, __b, ((__c << 2) & 0x0F)); +} +#endif + +#ifdef __POWER9_VECTOR__ +/* vec_slv */ +static __inline__ vector unsigned char __ATTRS_o_ai +vec_slv(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vslv(__a, __b); +} + +/* vec_srv */ +static __inline__ vector unsigned char __ATTRS_o_ai +vec_srv(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vsrv(__a, __b); +} +#endif + +/* vec_vsldoi */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsldoi(vector signed char __a, vector signed char __b, unsigned char __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector unsigned char __ATTRS_o_ai vec_vsldoi( + vector unsigned char __a, vector unsigned char __b, unsigned char __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector short __ATTRS_o_ai vec_vsldoi(vector short __a, + vector short __b, + unsigned char __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector unsigned short __ATTRS_o_ai vec_vsldoi( + vector unsigned short __a, vector unsigned short __b, unsigned char __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vsldoi(vector pixel __a, + vector pixel __b, + unsigned char __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector int __ATTRS_o_ai vec_vsldoi(vector int __a, + vector int __b, + unsigned char __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai vec_vsldoi( + vector unsigned int __a, vector unsigned int __b, unsigned char __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +static __inline__ vector float __ATTRS_o_ai vec_vsldoi(vector float __a, + vector float __b, + unsigned char __c) { + unsigned char __d = __c & 0x0F; +#ifdef __LITTLE_ENDIAN__ + return vec_perm( + __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d, + 20 - __d, 21 - __d, 22 - __d, 23 - __d, + 24 - __d, 25 - __d, 26 - __d, 27 - __d, + 28 - __d, 29 - __d, 30 - __d, 31 - __d)); +#else + return vec_perm( + __a, __b, + (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5, + __d + 6, __d + 7, __d + 8, __d + 9, __d + 10, + __d + 11, __d + 12, __d + 13, __d + 14, __d + 15)); +#endif +} + +/* vec_sll */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_sll(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_sll(vector signed char __a, vector unsigned short __b) { + return (vector signed char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_sll(vector signed char __a, vector unsigned int __b) { + return (vector signed char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sll(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sll(vector unsigned char __a, vector unsigned short __b) { + return (vector unsigned char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sll(vector unsigned char __a, vector unsigned int __b) { + return (vector unsigned char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_sll(vector bool char __a, vector unsigned char __b) { + return (vector bool char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_sll(vector bool char __a, vector unsigned short __b) { + return (vector bool char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_sll(vector bool char __a, vector unsigned int __b) { + return (vector bool char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_sll(vector short __a, + vector unsigned char __b) { + return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_sll(vector short __a, + vector unsigned short __b) { + return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_sll(vector short __a, + vector unsigned int __b) { + return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sll(vector unsigned short __a, vector unsigned char __b) { + return (vector unsigned short)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sll(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sll(vector unsigned short __a, vector unsigned int __b) { + return (vector unsigned short)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_sll(vector bool short __a, vector unsigned char __b) { + return (vector bool short)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_sll(vector bool short __a, vector unsigned short __b) { + return (vector bool short)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_sll(vector bool short __a, vector unsigned int __b) { + return (vector bool short)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_sll(vector pixel __a, + vector unsigned char __b) { + return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_sll(vector pixel __a, + vector unsigned short __b) { + return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_sll(vector pixel __a, + vector unsigned int __b) { + return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_sll(vector int __a, + vector unsigned char __b) { + return (vector int)__builtin_altivec_vsl(__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_sll(vector int __a, + vector unsigned short __b) { + return (vector int)__builtin_altivec_vsl(__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_sll(vector int __a, + vector unsigned int __b) { + return (vector int)__builtin_altivec_vsl(__a, (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sll(vector unsigned int __a, vector unsigned char __b) { + return (vector unsigned int)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sll(vector unsigned int __a, vector unsigned short __b) { + return (vector unsigned int)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sll(vector unsigned int __a, vector unsigned int __b) { + return (vector unsigned int)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_sll(vector bool int __a, vector unsigned char __b) { + return (vector bool int)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_sll(vector bool int __a, vector unsigned short __b) { + return (vector bool int)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_sll(vector bool int __a, vector unsigned int __b) { + return (vector bool int)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_sll(vector signed long long __a, vector unsigned char __b) { + return (vector signed long long)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sll(vector unsigned long long __a, vector unsigned char __b) { + return (vector unsigned long long)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} +#endif + +/* vec_vsl */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsl(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsl(vector signed char __a, vector unsigned short __b) { + return (vector signed char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsl(vector signed char __a, vector unsigned int __b) { + return (vector signed char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsl(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsl(vector unsigned char __a, vector unsigned short __b) { + return (vector unsigned char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsl(vector unsigned char __a, vector unsigned int __b) { + return (vector unsigned char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_vsl(vector bool char __a, vector unsigned char __b) { + return (vector bool char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_vsl(vector bool char __a, vector unsigned short __b) { + return (vector bool char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_vsl(vector bool char __a, vector unsigned int __b) { + return (vector bool char)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vsl(vector short __a, + vector unsigned char __b) { + return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vsl(vector short __a, + vector unsigned short __b) { + return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vsl(vector short __a, + vector unsigned int __b) { + return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsl(vector unsigned short __a, vector unsigned char __b) { + return (vector unsigned short)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsl(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsl(vector unsigned short __a, vector unsigned int __b) { + return (vector unsigned short)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vsl(vector bool short __a, vector unsigned char __b) { + return (vector bool short)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vsl(vector bool short __a, vector unsigned short __b) { + return (vector bool short)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vsl(vector bool short __a, vector unsigned int __b) { + return (vector bool short)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a, + vector unsigned char __b) { + return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a, + vector unsigned short __b) { + return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a, + vector unsigned int __b) { + return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vsl(vector int __a, + vector unsigned char __b) { + return (vector int)__builtin_altivec_vsl(__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vsl(vector int __a, + vector unsigned short __b) { + return (vector int)__builtin_altivec_vsl(__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vsl(vector int __a, + vector unsigned int __b) { + return (vector int)__builtin_altivec_vsl(__a, (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsl(vector unsigned int __a, vector unsigned char __b) { + return (vector unsigned int)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsl(vector unsigned int __a, vector unsigned short __b) { + return (vector unsigned int)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsl(vector unsigned int __a, vector unsigned int __b) { + return (vector unsigned int)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_vsl(vector bool int __a, vector unsigned char __b) { + return (vector bool int)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_vsl(vector bool int __a, vector unsigned short __b) { + return (vector bool int)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_vsl(vector bool int __a, vector unsigned int __b) { + return (vector bool int)__builtin_altivec_vsl((vector int)__a, + (vector int)__b); +} + +/* vec_slo */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_slo(vector signed char __a, vector signed char __b) { + return (vector signed char)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_slo(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_slo(vector unsigned char __a, vector signed char __b) { + return (vector unsigned char)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_slo(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_slo(vector short __a, + vector signed char __b) { + return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_slo(vector short __a, + vector unsigned char __b) { + return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_slo(vector unsigned short __a, vector signed char __b) { + return (vector unsigned short)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_slo(vector unsigned short __a, vector unsigned char __b) { + return (vector unsigned short)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_slo(vector pixel __a, + vector signed char __b) { + return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_slo(vector pixel __a, + vector unsigned char __b) { + return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_slo(vector int __a, + vector signed char __b) { + return (vector int)__builtin_altivec_vslo(__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_slo(vector int __a, + vector unsigned char __b) { + return (vector int)__builtin_altivec_vslo(__a, (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_slo(vector unsigned int __a, vector signed char __b) { + return (vector unsigned int)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_slo(vector unsigned int __a, vector unsigned char __b) { + return (vector unsigned int)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector float __ATTRS_o_ai vec_slo(vector float __a, + vector signed char __b) { + return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b); +} + +static __inline__ vector float __ATTRS_o_ai vec_slo(vector float __a, + vector unsigned char __b) { + return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b); +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_slo(vector signed long long __a, vector signed char __b) { + return (vector signed long long)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_slo(vector signed long long __a, vector unsigned char __b) { + return (vector signed long long)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_slo(vector unsigned long long __a, vector signed char __b) { + return (vector unsigned long long)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_slo(vector unsigned long long __a, vector unsigned char __b) { + return (vector unsigned long long)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} +#endif + +/* vec_vslo */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vslo(vector signed char __a, vector signed char __b) { + return (vector signed char)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vslo(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vslo(vector unsigned char __a, vector signed char __b) { + return (vector unsigned char)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vslo(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vslo(vector short __a, + vector signed char __b) { + return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vslo(vector short __a, + vector unsigned char __b) { + return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vslo(vector unsigned short __a, vector signed char __b) { + return (vector unsigned short)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vslo(vector unsigned short __a, vector unsigned char __b) { + return (vector unsigned short)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vslo(vector pixel __a, + vector signed char __b) { + return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vslo(vector pixel __a, + vector unsigned char __b) { + return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vslo(vector int __a, + vector signed char __b) { + return (vector int)__builtin_altivec_vslo(__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vslo(vector int __a, + vector unsigned char __b) { + return (vector int)__builtin_altivec_vslo(__a, (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vslo(vector unsigned int __a, vector signed char __b) { + return (vector unsigned int)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vslo(vector unsigned int __a, vector unsigned char __b) { + return (vector unsigned int)__builtin_altivec_vslo((vector int)__a, + (vector int)__b); +} + +static __inline__ vector float __ATTRS_o_ai vec_vslo(vector float __a, + vector signed char __b) { + return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b); +} + +static __inline__ vector float __ATTRS_o_ai vec_vslo(vector float __a, + vector unsigned char __b) { + return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b); +} + +/* vec_splat */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_splat(vector signed char __a, unsigned const int __b) { + return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_splat(vector unsigned char __a, unsigned const int __b) { + return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F)); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_splat(vector bool char __a, unsigned const int __b) { + return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F)); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_splat(vector signed short __a, unsigned const int __b) { + unsigned char b0 = (__b & 0x07) * 2; + unsigned char b1 = b0 + 1; + return vec_perm(__a, __a, + (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1, + b0, b1, b0, b1, b0, b1)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_splat(vector unsigned short __a, unsigned const int __b) { + unsigned char b0 = (__b & 0x07) * 2; + unsigned char b1 = b0 + 1; + return vec_perm(__a, __a, + (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1, + b0, b1, b0, b1, b0, b1)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_splat(vector bool short __a, unsigned const int __b) { + unsigned char b0 = (__b & 0x07) * 2; + unsigned char b1 = b0 + 1; + return vec_perm(__a, __a, + (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1, + b0, b1, b0, b1, b0, b1)); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_splat(vector pixel __a, + unsigned const int __b) { + unsigned char b0 = (__b & 0x07) * 2; + unsigned char b1 = b0 + 1; + return vec_perm(__a, __a, + (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1, + b0, b1, b0, b1, b0, b1)); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_splat(vector signed int __a, unsigned const int __b) { + unsigned char b0 = (__b & 0x03) * 4; + unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3; + return vec_perm(__a, __a, + (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1, + b2, b3, b0, b1, b2, b3)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_splat(vector unsigned int __a, unsigned const int __b) { + unsigned char b0 = (__b & 0x03) * 4; + unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3; + return vec_perm(__a, __a, + (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1, + b2, b3, b0, b1, b2, b3)); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_splat(vector bool int __a, unsigned const int __b) { + unsigned char b0 = (__b & 0x03) * 4; + unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3; + return vec_perm(__a, __a, + (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1, + b2, b3, b0, b1, b2, b3)); +} + +static __inline__ vector float __ATTRS_o_ai vec_splat(vector float __a, + unsigned const int __b) { + unsigned char b0 = (__b & 0x03) * 4; + unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3; + return vec_perm(__a, __a, + (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1, + b2, b3, b0, b1, b2, b3)); +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_splat(vector double __a, + unsigned const int __b) { + unsigned char b0 = (__b & 0x01) * 8; + unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5, + b6 = b0 + 6, b7 = b0 + 7; + return vec_perm(__a, __a, + (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1, + b2, b3, b4, b5, b6, b7)); +} +static __inline__ vector bool long long __ATTRS_o_ai +vec_splat(vector bool long long __a, unsigned const int __b) { + unsigned char b0 = (__b & 0x01) * 8; + unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5, + b6 = b0 + 6, b7 = b0 + 7; + return vec_perm(__a, __a, + (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1, + b2, b3, b4, b5, b6, b7)); +} +static __inline__ vector signed long long __ATTRS_o_ai +vec_splat(vector signed long long __a, unsigned const int __b) { + unsigned char b0 = (__b & 0x01) * 8; + unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5, + b6 = b0 + 6, b7 = b0 + 7; + return vec_perm(__a, __a, + (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1, + b2, b3, b4, b5, b6, b7)); +} +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_splat(vector unsigned long long __a, unsigned const int __b) { + unsigned char b0 = (__b & 0x01) * 8; + unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5, + b6 = b0 + 6, b7 = b0 + 7; + return vec_perm(__a, __a, + (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1, + b2, b3, b4, b5, b6, b7)); +} +#endif + +/* vec_vspltb */ + +#define __builtin_altivec_vspltb vec_vspltb + +static __inline__ vector signed char __ATTRS_o_ai +vec_vspltb(vector signed char __a, unsigned char __b) { + return vec_perm(__a, __a, (vector unsigned char)(__b)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vspltb(vector unsigned char __a, unsigned char __b) { + return vec_perm(__a, __a, (vector unsigned char)(__b)); +} + +static __inline__ vector bool char __ATTRS_o_ai vec_vspltb(vector bool char __a, + unsigned char __b) { + return vec_perm(__a, __a, (vector unsigned char)(__b)); +} + +/* vec_vsplth */ + +#define __builtin_altivec_vsplth vec_vsplth + +static __inline__ vector short __ATTRS_o_ai vec_vsplth(vector short __a, + unsigned char __b) { + __b *= 2; + unsigned char b1 = __b + 1; + return vec_perm(__a, __a, + (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1, + __b, b1, __b, b1, __b, b1, __b, b1)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsplth(vector unsigned short __a, unsigned char __b) { + __b *= 2; + unsigned char b1 = __b + 1; + return vec_perm(__a, __a, + (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1, + __b, b1, __b, b1, __b, b1, __b, b1)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vsplth(vector bool short __a, unsigned char __b) { + __b *= 2; + unsigned char b1 = __b + 1; + return vec_perm(__a, __a, + (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1, + __b, b1, __b, b1, __b, b1, __b, b1)); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vsplth(vector pixel __a, + unsigned char __b) { + __b *= 2; + unsigned char b1 = __b + 1; + return vec_perm(__a, __a, + (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1, + __b, b1, __b, b1, __b, b1, __b, b1)); +} + +/* vec_vspltw */ + +#define __builtin_altivec_vspltw vec_vspltw + +static __inline__ vector int __ATTRS_o_ai vec_vspltw(vector int __a, + unsigned char __b) { + __b *= 4; + unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3; + return vec_perm(__a, __a, + (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b, + b1, b2, b3, __b, b1, b2, b3)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vspltw(vector unsigned int __a, unsigned char __b) { + __b *= 4; + unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3; + return vec_perm(__a, __a, + (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b, + b1, b2, b3, __b, b1, b2, b3)); +} + +static __inline__ vector bool int __ATTRS_o_ai vec_vspltw(vector bool int __a, + unsigned char __b) { + __b *= 4; + unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3; + return vec_perm(__a, __a, + (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b, + b1, b2, b3, __b, b1, b2, b3)); +} + +static __inline__ vector float __ATTRS_o_ai vec_vspltw(vector float __a, + unsigned char __b) { + __b *= 4; + unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3; + return vec_perm(__a, __a, + (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b, + b1, b2, b3, __b, b1, b2, b3)); +} + +/* vec_splat_s8 */ + +#define __builtin_altivec_vspltisb vec_splat_s8 + +// FIXME: parameter should be treated as 5-bit signed literal +static __inline__ vector signed char __ATTRS_o_ai +vec_splat_s8(signed char __a) { + return (vector signed char)(__a); +} + +/* vec_vspltisb */ + +// FIXME: parameter should be treated as 5-bit signed literal +static __inline__ vector signed char __ATTRS_o_ai +vec_vspltisb(signed char __a) { + return (vector signed char)(__a); +} + +/* vec_splat_s16 */ + +#define __builtin_altivec_vspltish vec_splat_s16 + +// FIXME: parameter should be treated as 5-bit signed literal +static __inline__ vector short __ATTRS_o_ai vec_splat_s16(signed char __a) { + return (vector short)(__a); +} + +/* vec_vspltish */ + +// FIXME: parameter should be treated as 5-bit signed literal +static __inline__ vector short __ATTRS_o_ai vec_vspltish(signed char __a) { + return (vector short)(__a); +} + +/* vec_splat_s32 */ + +#define __builtin_altivec_vspltisw vec_splat_s32 + +// FIXME: parameter should be treated as 5-bit signed literal +static __inline__ vector int __ATTRS_o_ai vec_splat_s32(signed char __a) { + return (vector int)(__a); +} + +/* vec_vspltisw */ + +// FIXME: parameter should be treated as 5-bit signed literal +static __inline__ vector int __ATTRS_o_ai vec_vspltisw(signed char __a) { + return (vector int)(__a); +} + +/* vec_splat_u8 */ + +// FIXME: parameter should be treated as 5-bit signed literal +static __inline__ vector unsigned char __ATTRS_o_ai +vec_splat_u8(unsigned char __a) { + return (vector unsigned char)(__a); +} + +/* vec_splat_u16 */ + +// FIXME: parameter should be treated as 5-bit signed literal +static __inline__ vector unsigned short __ATTRS_o_ai +vec_splat_u16(signed char __a) { + return (vector unsigned short)(__a); +} + +/* vec_splat_u32 */ + +// FIXME: parameter should be treated as 5-bit signed literal +static __inline__ vector unsigned int __ATTRS_o_ai +vec_splat_u32(signed char __a) { + return (vector unsigned int)(__a); +} + +/* vec_sr */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_sr(vector signed char __a, vector unsigned char __b) { + vector unsigned char __res = (vector unsigned char)__a >> __b; + return (vector signed char)__res; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sr(vector unsigned char __a, vector unsigned char __b) { + return __a >> __b; +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_sr(vector signed short __a, vector unsigned short __b) { + vector unsigned short __res = (vector unsigned short)__a >> __b; + return (vector signed short)__res; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sr(vector unsigned short __a, vector unsigned short __b) { + return __a >> __b; +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_sr(vector signed int __a, vector unsigned int __b) { + vector unsigned int __res = (vector unsigned int)__a >> __b; + return (vector signed int)__res; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sr(vector unsigned int __a, vector unsigned int __b) { + return __a >> __b; +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_sr(vector signed long long __a, vector unsigned long long __b) { + vector unsigned long long __res = (vector unsigned long long)__a >> __b; + return (vector signed long long)__res; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sr(vector unsigned long long __a, vector unsigned long long __b) { + return __a >> __b; +} +#endif + +/* vec_vsrb */ + +#define __builtin_altivec_vsrb vec_vsrb + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsrb(vector signed char __a, vector unsigned char __b) { + return __a >> (vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsrb(vector unsigned char __a, vector unsigned char __b) { + return __a >> __b; +} + +/* vec_vsrh */ + +#define __builtin_altivec_vsrh vec_vsrh + +static __inline__ vector short __ATTRS_o_ai +vec_vsrh(vector short __a, vector unsigned short __b) { + return __a >> (vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsrh(vector unsigned short __a, vector unsigned short __b) { + return __a >> __b; +} + +/* vec_vsrw */ + +#define __builtin_altivec_vsrw vec_vsrw + +static __inline__ vector int __ATTRS_o_ai vec_vsrw(vector int __a, + vector unsigned int __b) { + return __a >> (vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsrw(vector unsigned int __a, vector unsigned int __b) { + return __a >> __b; +} + +/* vec_sra */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_sra(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_altivec_vsrab((vector char)__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sra(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vsrab((vector char)__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_sra(vector short __a, + vector unsigned short __b) { + return __builtin_altivec_vsrah(__a, (vector unsigned short)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sra(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)__builtin_altivec_vsrah((vector short)__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_sra(vector int __a, + vector unsigned int __b) { + return __builtin_altivec_vsraw(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sra(vector unsigned int __a, vector unsigned int __b) { + return (vector unsigned int)__builtin_altivec_vsraw((vector int)__a, __b); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_sra(vector signed long long __a, vector unsigned long long __b) { + return __a >> __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sra(vector unsigned long long __a, vector unsigned long long __b) { + return (vector unsigned long long)((vector signed long long)__a >> __b); +} +#endif + +/* vec_vsrab */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsrab(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_altivec_vsrab((vector char)__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsrab(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vsrab((vector char)__a, __b); +} + +/* vec_vsrah */ + +static __inline__ vector short __ATTRS_o_ai +vec_vsrah(vector short __a, vector unsigned short __b) { + return __builtin_altivec_vsrah(__a, (vector unsigned short)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsrah(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)__builtin_altivec_vsrah((vector short)__a, __b); +} + +/* vec_vsraw */ + +static __inline__ vector int __ATTRS_o_ai vec_vsraw(vector int __a, + vector unsigned int __b) { + return __builtin_altivec_vsraw(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsraw(vector unsigned int __a, vector unsigned int __b) { + return (vector unsigned int)__builtin_altivec_vsraw((vector int)__a, __b); +} + +/* vec_srl */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_srl(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_srl(vector signed char __a, vector unsigned short __b) { + return (vector signed char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_srl(vector signed char __a, vector unsigned int __b) { + return (vector signed char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_srl(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_srl(vector unsigned char __a, vector unsigned short __b) { + return (vector unsigned char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_srl(vector unsigned char __a, vector unsigned int __b) { + return (vector unsigned char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_srl(vector bool char __a, vector unsigned char __b) { + return (vector bool char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_srl(vector bool char __a, vector unsigned short __b) { + return (vector bool char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_srl(vector bool char __a, vector unsigned int __b) { + return (vector bool char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_srl(vector short __a, + vector unsigned char __b) { + return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_srl(vector short __a, + vector unsigned short __b) { + return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_srl(vector short __a, + vector unsigned int __b) { + return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_srl(vector unsigned short __a, vector unsigned char __b) { + return (vector unsigned short)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_srl(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_srl(vector unsigned short __a, vector unsigned int __b) { + return (vector unsigned short)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_srl(vector bool short __a, vector unsigned char __b) { + return (vector bool short)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_srl(vector bool short __a, vector unsigned short __b) { + return (vector bool short)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_srl(vector bool short __a, vector unsigned int __b) { + return (vector bool short)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_srl(vector pixel __a, + vector unsigned char __b) { + return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_srl(vector pixel __a, + vector unsigned short __b) { + return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_srl(vector pixel __a, + vector unsigned int __b) { + return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_srl(vector int __a, + vector unsigned char __b) { + return (vector int)__builtin_altivec_vsr(__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_srl(vector int __a, + vector unsigned short __b) { + return (vector int)__builtin_altivec_vsr(__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_srl(vector int __a, + vector unsigned int __b) { + return (vector int)__builtin_altivec_vsr(__a, (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_srl(vector unsigned int __a, vector unsigned char __b) { + return (vector unsigned int)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_srl(vector unsigned int __a, vector unsigned short __b) { + return (vector unsigned int)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_srl(vector unsigned int __a, vector unsigned int __b) { + return (vector unsigned int)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_srl(vector bool int __a, vector unsigned char __b) { + return (vector bool int)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_srl(vector bool int __a, vector unsigned short __b) { + return (vector bool int)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_srl(vector bool int __a, vector unsigned int __b) { + return (vector bool int)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_srl(vector signed long long __a, vector unsigned char __b) { + return (vector signed long long)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_srl(vector unsigned long long __a, vector unsigned char __b) { + return (vector unsigned long long)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} +#endif + +/* vec_vsr */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsr(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsr(vector signed char __a, vector unsigned short __b) { + return (vector signed char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsr(vector signed char __a, vector unsigned int __b) { + return (vector signed char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsr(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsr(vector unsigned char __a, vector unsigned short __b) { + return (vector unsigned char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsr(vector unsigned char __a, vector unsigned int __b) { + return (vector unsigned char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_vsr(vector bool char __a, vector unsigned char __b) { + return (vector bool char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_vsr(vector bool char __a, vector unsigned short __b) { + return (vector bool char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_vsr(vector bool char __a, vector unsigned int __b) { + return (vector bool char)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vsr(vector short __a, + vector unsigned char __b) { + return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vsr(vector short __a, + vector unsigned short __b) { + return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vsr(vector short __a, + vector unsigned int __b) { + return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsr(vector unsigned short __a, vector unsigned char __b) { + return (vector unsigned short)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsr(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsr(vector unsigned short __a, vector unsigned int __b) { + return (vector unsigned short)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vsr(vector bool short __a, vector unsigned char __b) { + return (vector bool short)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vsr(vector bool short __a, vector unsigned short __b) { + return (vector bool short)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vsr(vector bool short __a, vector unsigned int __b) { + return (vector bool short)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a, + vector unsigned char __b) { + return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a, + vector unsigned short __b) { + return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a, + vector unsigned int __b) { + return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vsr(vector int __a, + vector unsigned char __b) { + return (vector int)__builtin_altivec_vsr(__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vsr(vector int __a, + vector unsigned short __b) { + return (vector int)__builtin_altivec_vsr(__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vsr(vector int __a, + vector unsigned int __b) { + return (vector int)__builtin_altivec_vsr(__a, (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsr(vector unsigned int __a, vector unsigned char __b) { + return (vector unsigned int)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsr(vector unsigned int __a, vector unsigned short __b) { + return (vector unsigned int)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsr(vector unsigned int __a, vector unsigned int __b) { + return (vector unsigned int)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_vsr(vector bool int __a, vector unsigned char __b) { + return (vector bool int)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_vsr(vector bool int __a, vector unsigned short __b) { + return (vector bool int)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_vsr(vector bool int __a, vector unsigned int __b) { + return (vector bool int)__builtin_altivec_vsr((vector int)__a, + (vector int)__b); +} + +/* vec_sro */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_sro(vector signed char __a, vector signed char __b) { + return (vector signed char)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_sro(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sro(vector unsigned char __a, vector signed char __b) { + return (vector unsigned char)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sro(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_sro(vector short __a, + vector signed char __b) { + return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_sro(vector short __a, + vector unsigned char __b) { + return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sro(vector unsigned short __a, vector signed char __b) { + return (vector unsigned short)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sro(vector unsigned short __a, vector unsigned char __b) { + return (vector unsigned short)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_sro(vector pixel __a, + vector signed char __b) { + return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_sro(vector pixel __a, + vector unsigned char __b) { + return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_sro(vector int __a, + vector signed char __b) { + return (vector int)__builtin_altivec_vsro(__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_sro(vector int __a, + vector unsigned char __b) { + return (vector int)__builtin_altivec_vsro(__a, (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sro(vector unsigned int __a, vector signed char __b) { + return (vector unsigned int)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sro(vector unsigned int __a, vector unsigned char __b) { + return (vector unsigned int)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector float __ATTRS_o_ai vec_sro(vector float __a, + vector signed char __b) { + return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b); +} + +static __inline__ vector float __ATTRS_o_ai vec_sro(vector float __a, + vector unsigned char __b) { + return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b); +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_sro(vector signed long long __a, vector signed char __b) { + return (vector signed long long)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_sro(vector signed long long __a, vector unsigned char __b) { + return (vector signed long long)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sro(vector unsigned long long __a, vector signed char __b) { + return (vector unsigned long long)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sro(vector unsigned long long __a, vector unsigned char __b) { + return (vector unsigned long long)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} +#endif + +/* vec_vsro */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsro(vector signed char __a, vector signed char __b) { + return (vector signed char)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsro(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsro(vector unsigned char __a, vector signed char __b) { + return (vector unsigned char)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsro(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vsro(vector short __a, + vector signed char __b) { + return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vsro(vector short __a, + vector unsigned char __b) { + return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsro(vector unsigned short __a, vector signed char __b) { + return (vector unsigned short)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsro(vector unsigned short __a, vector unsigned char __b) { + return (vector unsigned short)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vsro(vector pixel __a, + vector signed char __b) { + return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_vsro(vector pixel __a, + vector unsigned char __b) { + return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vsro(vector int __a, + vector signed char __b) { + return (vector int)__builtin_altivec_vsro(__a, (vector int)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vsro(vector int __a, + vector unsigned char __b) { + return (vector int)__builtin_altivec_vsro(__a, (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsro(vector unsigned int __a, vector signed char __b) { + return (vector unsigned int)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsro(vector unsigned int __a, vector unsigned char __b) { + return (vector unsigned int)__builtin_altivec_vsro((vector int)__a, + (vector int)__b); +} + +static __inline__ vector float __ATTRS_o_ai vec_vsro(vector float __a, + vector signed char __b) { + return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b); +} + +static __inline__ vector float __ATTRS_o_ai vec_vsro(vector float __a, + vector unsigned char __b) { + return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b); +} + +/* vec_st */ + +static __inline__ void __ATTRS_o_ai vec_st(vector signed char __a, int __b, + vector signed char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector signed char __a, int __b, + signed char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector unsigned char __a, int __b, + vector unsigned char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector unsigned char __a, int __b, + unsigned char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, int __b, + signed char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, int __b, + unsigned char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, int __b, + vector bool char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector short __a, int __b, + vector short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector short __a, int __b, + short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector unsigned short __a, int __b, + vector unsigned short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector unsigned short __a, int __b, + unsigned short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, int __b, + short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, int __b, + unsigned short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, int __b, + vector bool short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, int __b, + short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, int __b, + unsigned short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, int __b, + vector pixel *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector int __a, int __b, + vector int *__c) { + __builtin_altivec_stvx(__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector int __a, int __b, int *__c) { + __builtin_altivec_stvx(__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector unsigned int __a, int __b, + vector unsigned int *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector unsigned int __a, int __b, + unsigned int *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, int __b, + int *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, int __b, + unsigned int *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, int __b, + vector bool int *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector float __a, int __b, + vector float *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_st(vector float __a, int __b, + float *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +/* vec_stvx */ + +static __inline__ void __ATTRS_o_ai vec_stvx(vector signed char __a, int __b, + vector signed char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector signed char __a, int __b, + signed char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned char __a, int __b, + vector unsigned char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned char __a, int __b, + unsigned char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b, + signed char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b, + unsigned char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b, + vector bool char *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector short __a, int __b, + vector short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector short __a, int __b, + short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned short __a, int __b, + vector unsigned short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned short __a, int __b, + unsigned short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b, + short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b, + unsigned short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b, + vector bool short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b, + short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b, + unsigned short *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b, + vector pixel *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector int __a, int __b, + vector int *__c) { + __builtin_altivec_stvx(__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector int __a, int __b, + int *__c) { + __builtin_altivec_stvx(__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned int __a, int __b, + vector unsigned int *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned int __a, int __b, + unsigned int *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b, + int *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b, + unsigned int *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b, + vector bool int *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector float __a, int __b, + vector float *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvx(vector float __a, int __b, + float *__c) { + __builtin_altivec_stvx((vector int)__a, __b, __c); +} + +/* vec_ste */ + +static __inline__ void __ATTRS_o_ai vec_ste(vector signed char __a, int __b, + signed char *__c) { + __builtin_altivec_stvebx((vector char)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned char __a, int __b, + unsigned char *__c) { + __builtin_altivec_stvebx((vector char)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector bool char __a, int __b, + signed char *__c) { + __builtin_altivec_stvebx((vector char)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector bool char __a, int __b, + unsigned char *__c) { + __builtin_altivec_stvebx((vector char)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector short __a, int __b, + short *__c) { + __builtin_altivec_stvehx(__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned short __a, int __b, + unsigned short *__c) { + __builtin_altivec_stvehx((vector short)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector bool short __a, int __b, + short *__c) { + __builtin_altivec_stvehx((vector short)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector bool short __a, int __b, + unsigned short *__c) { + __builtin_altivec_stvehx((vector short)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector pixel __a, int __b, + short *__c) { + __builtin_altivec_stvehx((vector short)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector pixel __a, int __b, + unsigned short *__c) { + __builtin_altivec_stvehx((vector short)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector int __a, int __b, int *__c) { + __builtin_altivec_stvewx(__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned int __a, int __b, + unsigned int *__c) { + __builtin_altivec_stvewx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector bool int __a, int __b, + int *__c) { + __builtin_altivec_stvewx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector bool int __a, int __b, + unsigned int *__c) { + __builtin_altivec_stvewx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_ste(vector float __a, int __b, + float *__c) { + __builtin_altivec_stvewx((vector int)__a, __b, __c); +} + +/* vec_stvebx */ + +static __inline__ void __ATTRS_o_ai vec_stvebx(vector signed char __a, int __b, + signed char *__c) { + __builtin_altivec_stvebx((vector char)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvebx(vector unsigned char __a, + int __b, unsigned char *__c) { + __builtin_altivec_stvebx((vector char)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvebx(vector bool char __a, int __b, + signed char *__c) { + __builtin_altivec_stvebx((vector char)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvebx(vector bool char __a, int __b, + unsigned char *__c) { + __builtin_altivec_stvebx((vector char)__a, __b, __c); +} + +/* vec_stvehx */ + +static __inline__ void __ATTRS_o_ai vec_stvehx(vector short __a, int __b, + short *__c) { + __builtin_altivec_stvehx(__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvehx(vector unsigned short __a, + int __b, unsigned short *__c) { + __builtin_altivec_stvehx((vector short)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvehx(vector bool short __a, int __b, + short *__c) { + __builtin_altivec_stvehx((vector short)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvehx(vector bool short __a, int __b, + unsigned short *__c) { + __builtin_altivec_stvehx((vector short)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvehx(vector pixel __a, int __b, + short *__c) { + __builtin_altivec_stvehx((vector short)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvehx(vector pixel __a, int __b, + unsigned short *__c) { + __builtin_altivec_stvehx((vector short)__a, __b, __c); +} + +/* vec_stvewx */ + +static __inline__ void __ATTRS_o_ai vec_stvewx(vector int __a, int __b, + int *__c) { + __builtin_altivec_stvewx(__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvewx(vector unsigned int __a, int __b, + unsigned int *__c) { + __builtin_altivec_stvewx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvewx(vector bool int __a, int __b, + int *__c) { + __builtin_altivec_stvewx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvewx(vector bool int __a, int __b, + unsigned int *__c) { + __builtin_altivec_stvewx((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvewx(vector float __a, int __b, + float *__c) { + __builtin_altivec_stvewx((vector int)__a, __b, __c); +} + +/* vec_stl */ + +static __inline__ void __ATTRS_o_ai vec_stl(vector signed char __a, int __b, + vector signed char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector signed char __a, int __b, + signed char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned char __a, int __b, + vector unsigned char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned char __a, int __b, + unsigned char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector bool char __a, int __b, + signed char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector bool char __a, int __b, + unsigned char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector bool char __a, int __b, + vector bool char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector short __a, int __b, + vector short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector short __a, int __b, + short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned short __a, int __b, + vector unsigned short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned short __a, int __b, + unsigned short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector bool short __a, int __b, + short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector bool short __a, int __b, + unsigned short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector bool short __a, int __b, + vector bool short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector pixel __a, int __b, + short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector pixel __a, int __b, + unsigned short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector pixel __a, int __b, + vector pixel *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector int __a, int __b, + vector int *__c) { + __builtin_altivec_stvxl(__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector int __a, int __b, int *__c) { + __builtin_altivec_stvxl(__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned int __a, int __b, + vector unsigned int *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned int __a, int __b, + unsigned int *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector bool int __a, int __b, + int *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector bool int __a, int __b, + unsigned int *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector bool int __a, int __b, + vector bool int *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector float __a, int __b, + vector float *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stl(vector float __a, int __b, + float *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +/* vec_stvxl */ + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector signed char __a, int __b, + vector signed char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector signed char __a, int __b, + signed char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned char __a, int __b, + vector unsigned char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned char __a, int __b, + unsigned char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b, + signed char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b, + unsigned char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b, + vector bool char *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector short __a, int __b, + vector short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector short __a, int __b, + short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned short __a, + int __b, + vector unsigned short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned short __a, + int __b, unsigned short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b, + short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b, + unsigned short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b, + vector bool short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b, + short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b, + unsigned short *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b, + vector pixel *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector int __a, int __b, + vector int *__c) { + __builtin_altivec_stvxl(__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector int __a, int __b, + int *__c) { + __builtin_altivec_stvxl(__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned int __a, int __b, + vector unsigned int *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned int __a, int __b, + unsigned int *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b, + int *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b, + unsigned int *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b, + vector bool int *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector float __a, int __b, + vector float *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvxl(vector float __a, int __b, + float *__c) { + __builtin_altivec_stvxl((vector int)__a, __b, __c); +} + +/* vec_sub */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_sub(vector signed char __a, vector signed char __b) { + return __a - __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_sub(vector bool char __a, vector signed char __b) { + return (vector signed char)__a - __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_sub(vector signed char __a, vector bool char __b) { + return __a - (vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sub(vector unsigned char __a, vector unsigned char __b) { + return __a - __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sub(vector bool char __a, vector unsigned char __b) { + return (vector unsigned char)__a - __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_sub(vector unsigned char __a, vector bool char __b) { + return __a - (vector unsigned char)__b; +} + +static __inline__ vector short __ATTRS_o_ai vec_sub(vector short __a, + vector short __b) { + return __a - __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_sub(vector bool short __a, + vector short __b) { + return (vector short)__a - __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_sub(vector short __a, + vector bool short __b) { + return __a - (vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sub(vector unsigned short __a, vector unsigned short __b) { + return __a - __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sub(vector bool short __a, vector unsigned short __b) { + return (vector unsigned short)__a - __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_sub(vector unsigned short __a, vector bool short __b) { + return __a - (vector unsigned short)__b; +} + +static __inline__ vector int __ATTRS_o_ai vec_sub(vector int __a, + vector int __b) { + return __a - __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_sub(vector bool int __a, + vector int __b) { + return (vector int)__a - __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_sub(vector int __a, + vector bool int __b) { + return __a - (vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sub(vector unsigned int __a, vector unsigned int __b) { + return __a - __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sub(vector bool int __a, vector unsigned int __b) { + return (vector unsigned int)__a - __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sub(vector unsigned int __a, vector bool int __b) { + return __a - (vector unsigned int)__b; +} + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_sub(vector signed __int128 __a, vector signed __int128 __b) { + return __a - __b; +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_sub(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __a - __b; +} +#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__) + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_sub(vector signed long long __a, vector signed long long __b) { + return __a - __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_sub(vector unsigned long long __a, vector unsigned long long __b) { + return __a - __b; +} + +static __inline__ vector double __ATTRS_o_ai vec_sub(vector double __a, + vector double __b) { + return __a - __b; +} +#endif + +static __inline__ vector float __ATTRS_o_ai vec_sub(vector float __a, + vector float __b) { + return __a - __b; +} + +/* vec_vsububm */ + +#define __builtin_altivec_vsububm vec_vsububm + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsububm(vector signed char __a, vector signed char __b) { + return __a - __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsububm(vector bool char __a, vector signed char __b) { + return (vector signed char)__a - __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsububm(vector signed char __a, vector bool char __b) { + return __a - (vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsububm(vector unsigned char __a, vector unsigned char __b) { + return __a - __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsububm(vector bool char __a, vector unsigned char __b) { + return (vector unsigned char)__a - __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsububm(vector unsigned char __a, vector bool char __b) { + return __a - (vector unsigned char)__b; +} + +/* vec_vsubuhm */ + +#define __builtin_altivec_vsubuhm vec_vsubuhm + +static __inline__ vector short __ATTRS_o_ai vec_vsubuhm(vector short __a, + vector short __b) { + return __a - __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vsubuhm(vector bool short __a, + vector short __b) { + return (vector short)__a - __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vsubuhm(vector short __a, + vector bool short __b) { + return __a - (vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsubuhm(vector unsigned short __a, vector unsigned short __b) { + return __a - __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsubuhm(vector bool short __a, vector unsigned short __b) { + return (vector unsigned short)__a - __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsubuhm(vector unsigned short __a, vector bool short __b) { + return __a - (vector unsigned short)__b; +} + +/* vec_vsubuwm */ + +#define __builtin_altivec_vsubuwm vec_vsubuwm + +static __inline__ vector int __ATTRS_o_ai vec_vsubuwm(vector int __a, + vector int __b) { + return __a - __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vsubuwm(vector bool int __a, + vector int __b) { + return (vector int)__a - __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vsubuwm(vector int __a, + vector bool int __b) { + return __a - (vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsubuwm(vector unsigned int __a, vector unsigned int __b) { + return __a - __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsubuwm(vector bool int __a, vector unsigned int __b) { + return (vector unsigned int)__a - __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsubuwm(vector unsigned int __a, vector bool int __b) { + return __a - (vector unsigned int)__b; +} + +/* vec_vsubfp */ + +#define __builtin_altivec_vsubfp vec_vsubfp + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vsubfp(vector float __a, vector float __b) { + return __a - __b; +} + +/* vec_subc */ + +static __inline__ vector signed int __ATTRS_o_ai +vec_subc(vector signed int __a, vector signed int __b) { + return (vector signed int)__builtin_altivec_vsubcuw((vector unsigned int)__a, + (vector unsigned int) __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_subc(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vsubcuw(__a, __b); +} + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_subc(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __builtin_altivec_vsubcuq(__a, __b); +} + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_subc(vector signed __int128 __a, vector signed __int128 __b) { + return __builtin_altivec_vsubcuq(__a, __b); +} +#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__) + +/* vec_vsubcuw */ + +static __inline__ vector unsigned int __attribute__((__always_inline__)) +vec_vsubcuw(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vsubcuw(__a, __b); +} + +/* vec_subs */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_subs(vector signed char __a, vector signed char __b) { + return __builtin_altivec_vsubsbs(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_subs(vector bool char __a, vector signed char __b) { + return __builtin_altivec_vsubsbs((vector signed char)__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_subs(vector signed char __a, vector bool char __b) { + return __builtin_altivec_vsubsbs(__a, (vector signed char)__b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_subs(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vsububs(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_subs(vector bool char __a, vector unsigned char __b) { + return __builtin_altivec_vsububs((vector unsigned char)__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_subs(vector unsigned char __a, vector bool char __b) { + return __builtin_altivec_vsububs(__a, (vector unsigned char)__b); +} + +static __inline__ vector short __ATTRS_o_ai vec_subs(vector short __a, + vector short __b) { + return __builtin_altivec_vsubshs(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_subs(vector bool short __a, + vector short __b) { + return __builtin_altivec_vsubshs((vector short)__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_subs(vector short __a, + vector bool short __b) { + return __builtin_altivec_vsubshs(__a, (vector short)__b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_subs(vector unsigned short __a, vector unsigned short __b) { + return __builtin_altivec_vsubuhs(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_subs(vector bool short __a, vector unsigned short __b) { + return __builtin_altivec_vsubuhs((vector unsigned short)__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_subs(vector unsigned short __a, vector bool short __b) { + return __builtin_altivec_vsubuhs(__a, (vector unsigned short)__b); +} + +static __inline__ vector int __ATTRS_o_ai vec_subs(vector int __a, + vector int __b) { + return __builtin_altivec_vsubsws(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_subs(vector bool int __a, + vector int __b) { + return __builtin_altivec_vsubsws((vector int)__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_subs(vector int __a, + vector bool int __b) { + return __builtin_altivec_vsubsws(__a, (vector int)__b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_subs(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vsubuws(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_subs(vector bool int __a, vector unsigned int __b) { + return __builtin_altivec_vsubuws((vector unsigned int)__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_subs(vector unsigned int __a, vector bool int __b) { + return __builtin_altivec_vsubuws(__a, (vector unsigned int)__b); +} + +/* vec_vsubsbs */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsubsbs(vector signed char __a, vector signed char __b) { + return __builtin_altivec_vsubsbs(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsubsbs(vector bool char __a, vector signed char __b) { + return __builtin_altivec_vsubsbs((vector signed char)__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsubsbs(vector signed char __a, vector bool char __b) { + return __builtin_altivec_vsubsbs(__a, (vector signed char)__b); +} + +/* vec_vsububs */ + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsububs(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vsububs(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsububs(vector bool char __a, vector unsigned char __b) { + return __builtin_altivec_vsububs((vector unsigned char)__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsububs(vector unsigned char __a, vector bool char __b) { + return __builtin_altivec_vsububs(__a, (vector unsigned char)__b); +} + +/* vec_vsubshs */ + +static __inline__ vector short __ATTRS_o_ai vec_vsubshs(vector short __a, + vector short __b) { + return __builtin_altivec_vsubshs(__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vsubshs(vector bool short __a, + vector short __b) { + return __builtin_altivec_vsubshs((vector short)__a, __b); +} + +static __inline__ vector short __ATTRS_o_ai vec_vsubshs(vector short __a, + vector bool short __b) { + return __builtin_altivec_vsubshs(__a, (vector short)__b); +} + +/* vec_vsubuhs */ + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsubuhs(vector unsigned short __a, vector unsigned short __b) { + return __builtin_altivec_vsubuhs(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsubuhs(vector bool short __a, vector unsigned short __b) { + return __builtin_altivec_vsubuhs((vector unsigned short)__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsubuhs(vector unsigned short __a, vector bool short __b) { + return __builtin_altivec_vsubuhs(__a, (vector unsigned short)__b); +} + +/* vec_vsubsws */ + +static __inline__ vector int __ATTRS_o_ai vec_vsubsws(vector int __a, + vector int __b) { + return __builtin_altivec_vsubsws(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vsubsws(vector bool int __a, + vector int __b) { + return __builtin_altivec_vsubsws((vector int)__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_vsubsws(vector int __a, + vector bool int __b) { + return __builtin_altivec_vsubsws(__a, (vector int)__b); +} + +/* vec_vsubuws */ + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsubuws(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_vsubuws(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsubuws(vector bool int __a, vector unsigned int __b) { + return __builtin_altivec_vsubuws((vector unsigned int)__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsubuws(vector unsigned int __a, vector bool int __b) { + return __builtin_altivec_vsubuws(__a, (vector unsigned int)__b); +} + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) +/* vec_vsubuqm */ + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_vsubuqm(vector signed __int128 __a, vector signed __int128 __b) { + return __a - __b; +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_vsubuqm(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __a - __b; +} + +/* vec_vsubeuqm */ + + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_vsubeuqm(vector signed __int128 __a, vector signed __int128 __b, + vector signed __int128 __c) { + return __builtin_altivec_vsubeuqm(__a, __b, __c); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_vsubeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b, + vector unsigned __int128 __c) { + return __builtin_altivec_vsubeuqm(__a, __b, __c); +} + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_sube(vector signed __int128 __a, vector signed __int128 __b, + vector signed __int128 __c) { + return __builtin_altivec_vsubeuqm(__a, __b, __c); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_sube(vector unsigned __int128 __a, vector unsigned __int128 __b, + vector unsigned __int128 __c) { + return __builtin_altivec_vsubeuqm(__a, __b, __c); +} + +/* vec_vsubcuq */ + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_vsubcuq(vector signed __int128 __a, vector signed __int128 __b) { + return __builtin_altivec_vsubcuq(__a, __b); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_vsubcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __builtin_altivec_vsubcuq(__a, __b); +} + +/* vec_vsubecuq */ + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_vsubecuq(vector signed __int128 __a, vector signed __int128 __b, + vector signed __int128 __c) { + return __builtin_altivec_vsubecuq(__a, __b, __c); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_vsubecuq(vector unsigned __int128 __a, vector unsigned __int128 __b, + vector unsigned __int128 __c) { + return __builtin_altivec_vsubecuq(__a, __b, __c); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_subec(vector signed int __a, vector signed int __b, + vector signed int __c) { + return vec_addec(__a, ~__b, __c); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_subec(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c) { + return vec_addec(__a, ~__b, __c); +} + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_subec(vector signed __int128 __a, vector signed __int128 __b, + vector signed __int128 __c) { + return __builtin_altivec_vsubecuq(__a, __b, __c); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_subec(vector unsigned __int128 __a, vector unsigned __int128 __b, + vector unsigned __int128 __c) { + return __builtin_altivec_vsubecuq(__a, __b, __c); +} +#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__) + +static __inline__ vector signed int __ATTRS_o_ai +vec_sube(vector signed int __a, vector signed int __b, + vector signed int __c) { + vector signed int __mask = {1, 1, 1, 1}; + vector signed int __carry = __c & __mask; + return vec_adde(__a, ~__b, __carry); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sube(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c) { + vector unsigned int __mask = {1, 1, 1, 1}; + vector unsigned int __carry = __c & __mask; + return vec_adde(__a, ~__b, __carry); +} +/* vec_sum4s */ + +static __inline__ vector int __ATTRS_o_ai vec_sum4s(vector signed char __a, + vector int __b) { + return __builtin_altivec_vsum4sbs(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_sum4s(vector unsigned char __a, vector unsigned int __b) { + return __builtin_altivec_vsum4ubs(__a, __b); +} + +static __inline__ vector int __ATTRS_o_ai vec_sum4s(vector signed short __a, + vector int __b) { + return __builtin_altivec_vsum4shs(__a, __b); +} + +/* vec_vsum4sbs */ + +static __inline__ vector int __attribute__((__always_inline__)) +vec_vsum4sbs(vector signed char __a, vector int __b) { + return __builtin_altivec_vsum4sbs(__a, __b); +} + +/* vec_vsum4ubs */ + +static __inline__ vector unsigned int __attribute__((__always_inline__)) +vec_vsum4ubs(vector unsigned char __a, vector unsigned int __b) { + return __builtin_altivec_vsum4ubs(__a, __b); +} + +/* vec_vsum4shs */ + +static __inline__ vector int __attribute__((__always_inline__)) +vec_vsum4shs(vector signed short __a, vector int __b) { + return __builtin_altivec_vsum4shs(__a, __b); +} + +/* vec_sum2s */ + +/* The vsum2sws instruction has a big-endian bias, so that the second + input vector and the result always reference big-endian elements + 1 and 3 (little-endian element 0 and 2). For ease of porting the + programmer wants elements 1 and 3 in both cases, so for little + endian we must perform some permutes. */ + +static __inline__ vector signed int __attribute__((__always_inline__)) +vec_sum2s(vector int __a, vector int __b) { +#ifdef __LITTLE_ENDIAN__ + vector int __c = (vector signed int)vec_perm( + __b, __b, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, + 8, 9, 10, 11)); + __c = __builtin_altivec_vsum2sws(__a, __c); + return (vector signed int)vec_perm( + __c, __c, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, + 8, 9, 10, 11)); +#else + return __builtin_altivec_vsum2sws(__a, __b); +#endif +} + +/* vec_vsum2sws */ + +static __inline__ vector signed int __attribute__((__always_inline__)) +vec_vsum2sws(vector int __a, vector int __b) { +#ifdef __LITTLE_ENDIAN__ + vector int __c = (vector signed int)vec_perm( + __b, __b, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, + 8, 9, 10, 11)); + __c = __builtin_altivec_vsum2sws(__a, __c); + return (vector signed int)vec_perm( + __c, __c, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, + 8, 9, 10, 11)); +#else + return __builtin_altivec_vsum2sws(__a, __b); +#endif +} + +/* vec_sums */ + +/* The vsumsws instruction has a big-endian bias, so that the second + input vector and the result always reference big-endian element 3 + (little-endian element 0). For ease of porting the programmer + wants element 3 in both cases, so for little endian we must perform + some permutes. */ + +static __inline__ vector signed int __attribute__((__always_inline__)) +vec_sums(vector signed int __a, vector signed int __b) { +#ifdef __LITTLE_ENDIAN__ + __b = (vector signed int)vec_splat(__b, 3); + __b = __builtin_altivec_vsumsws(__a, __b); + return (vector signed int)(0, 0, 0, __b[0]); +#else + return __builtin_altivec_vsumsws(__a, __b); +#endif +} + +/* vec_vsumsws */ + +static __inline__ vector signed int __attribute__((__always_inline__)) +vec_vsumsws(vector signed int __a, vector signed int __b) { +#ifdef __LITTLE_ENDIAN__ + __b = (vector signed int)vec_splat(__b, 3); + __b = __builtin_altivec_vsumsws(__a, __b); + return (vector signed int)(0, 0, 0, __b[0]); +#else + return __builtin_altivec_vsumsws(__a, __b); +#endif +} + +/* vec_trunc */ + +static __inline__ vector float __ATTRS_o_ai vec_trunc(vector float __a) { +#ifdef __VSX__ + return __builtin_vsx_xvrspiz(__a); +#else + return __builtin_altivec_vrfiz(__a); +#endif +} + +#ifdef __VSX__ +static __inline__ vector double __ATTRS_o_ai vec_trunc(vector double __a) { + return __builtin_vsx_xvrdpiz(__a); +} +#endif + +/* vec_vrfiz */ + +static __inline__ vector float __attribute__((__always_inline__)) +vec_vrfiz(vector float __a) { + return __builtin_altivec_vrfiz(__a); +} + +/* vec_unpackh */ + +/* The vector unpack instructions all have a big-endian bias, so for + little endian we must reverse the meanings of "high" and "low." */ + +static __inline__ vector short __ATTRS_o_ai +vec_unpackh(vector signed char __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupklsb((vector char)__a); +#else + return __builtin_altivec_vupkhsb((vector char)__a); +#endif +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_unpackh(vector bool char __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool short)__builtin_altivec_vupklsb((vector char)__a); +#else + return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a); +#endif +} + +static __inline__ vector int __ATTRS_o_ai vec_unpackh(vector short __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupklsh(__a); +#else + return __builtin_altivec_vupkhsh(__a); +#endif +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_unpackh(vector bool short __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool int)__builtin_altivec_vupklsh((vector short)__a); +#else + return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_unpackh(vector pixel __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a); +#else + return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a); +#endif +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector long long __ATTRS_o_ai vec_unpackh(vector int __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupklsw(__a); +#else + return __builtin_altivec_vupkhsw(__a); +#endif +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_unpackh(vector bool int __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a); +#else + return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a); +#endif +} + +static __inline__ vector double __ATTRS_o_ai +vec_unpackh(vector float __a) { + return (vector double)(__a[0], __a[1]); +} +#endif + +/* vec_vupkhsb */ + +static __inline__ vector short __ATTRS_o_ai +vec_vupkhsb(vector signed char __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupklsb((vector char)__a); +#else + return __builtin_altivec_vupkhsb((vector char)__a); +#endif +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vupkhsb(vector bool char __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool short)__builtin_altivec_vupklsb((vector char)__a); +#else + return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a); +#endif +} + +/* vec_vupkhsh */ + +static __inline__ vector int __ATTRS_o_ai vec_vupkhsh(vector short __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupklsh(__a); +#else + return __builtin_altivec_vupkhsh(__a); +#endif +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_vupkhsh(vector bool short __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool int)__builtin_altivec_vupklsh((vector short)__a); +#else + return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vupkhsh(vector pixel __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a); +#else + return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a); +#endif +} + +/* vec_vupkhsw */ + +#ifdef __POWER8_VECTOR__ +static __inline__ vector long long __ATTRS_o_ai vec_vupkhsw(vector int __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupklsw(__a); +#else + return __builtin_altivec_vupkhsw(__a); +#endif +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_vupkhsw(vector bool int __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a); +#else + return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a); +#endif +} +#endif + +/* vec_unpackl */ + +static __inline__ vector short __ATTRS_o_ai +vec_unpackl(vector signed char __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupkhsb((vector char)__a); +#else + return __builtin_altivec_vupklsb((vector char)__a); +#endif +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_unpackl(vector bool char __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a); +#else + return (vector bool short)__builtin_altivec_vupklsb((vector char)__a); +#endif +} + +static __inline__ vector int __ATTRS_o_ai vec_unpackl(vector short __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupkhsh(__a); +#else + return __builtin_altivec_vupklsh(__a); +#endif +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_unpackl(vector bool short __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a); +#else + return (vector bool int)__builtin_altivec_vupklsh((vector short)__a); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_unpackl(vector pixel __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a); +#else + return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a); +#endif +} + +#ifdef __POWER8_VECTOR__ +static __inline__ vector long long __ATTRS_o_ai vec_unpackl(vector int __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupkhsw(__a); +#else + return __builtin_altivec_vupklsw(__a); +#endif +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_unpackl(vector bool int __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a); +#else + return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a); +#endif +} + +static __inline__ vector double __ATTRS_o_ai +vec_unpackl(vector float __a) { + return (vector double)(__a[2], __a[3]); +} +#endif + +/* vec_vupklsb */ + +static __inline__ vector short __ATTRS_o_ai +vec_vupklsb(vector signed char __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupkhsb((vector char)__a); +#else + return __builtin_altivec_vupklsb((vector char)__a); +#endif +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vupklsb(vector bool char __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a); +#else + return (vector bool short)__builtin_altivec_vupklsb((vector char)__a); +#endif +} + +/* vec_vupklsh */ + +static __inline__ vector int __ATTRS_o_ai vec_vupklsh(vector short __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupkhsh(__a); +#else + return __builtin_altivec_vupklsh(__a); +#endif +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_vupklsh(vector bool short __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a); +#else + return (vector bool int)__builtin_altivec_vupklsh((vector short)__a); +#endif +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vupklsh(vector pixel __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a); +#else + return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a); +#endif +} + +/* vec_vupklsw */ + +#ifdef __POWER8_VECTOR__ +static __inline__ vector long long __ATTRS_o_ai vec_vupklsw(vector int __a) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupkhsw(__a); +#else + return __builtin_altivec_vupklsw(__a); +#endif +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_vupklsw(vector bool int __a) { +#ifdef __LITTLE_ENDIAN__ + return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a); +#else + return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a); +#endif +} +#endif + +/* vec_vsx_ld */ + +#ifdef __VSX__ + +static __inline__ vector bool int __ATTRS_o_ai +vec_vsx_ld(int __a, const vector bool int *__b) { + return (vector bool int)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_vsx_ld(int __a, const vector signed int *__b) { + return (vector signed int)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_vsx_ld(int __a, const signed int *__b) { + return (vector signed int)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsx_ld(int __a, const vector unsigned int *__b) { + return (vector unsigned int)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vsx_ld(int __a, const unsigned int *__b) { + return (vector unsigned int)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai +vec_vsx_ld(int __a, const vector float *__b) { + return (vector float)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector float __ATTRS_o_ai vec_vsx_ld(int __a, + const float *__b) { + return (vector float)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_vsx_ld(int __a, const vector signed long long *__b) { + return (vector signed long long)__builtin_vsx_lxvd2x(__a, __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vsx_ld(int __a, const vector unsigned long long *__b) { + return (vector unsigned long long)__builtin_vsx_lxvd2x(__a, __b); +} + +static __inline__ vector double __ATTRS_o_ai +vec_vsx_ld(int __a, const vector double *__b) { + return (vector double)__builtin_vsx_lxvd2x(__a, __b); +} + +static __inline__ vector double __ATTRS_o_ai +vec_vsx_ld(int __a, const double *__b) { + return (vector double)__builtin_vsx_lxvd2x(__a, __b); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vsx_ld(int __a, const vector bool short *__b) { + return (vector bool short)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_vsx_ld(int __a, const vector signed short *__b) { + return (vector signed short)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_vsx_ld(int __a, const signed short *__b) { + return (vector signed short)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsx_ld(int __a, const vector unsigned short *__b) { + return (vector unsigned short)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vsx_ld(int __a, const unsigned short *__b) { + return (vector unsigned short)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_vsx_ld(int __a, const vector bool char *__b) { + return (vector bool char)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsx_ld(int __a, const vector signed char *__b) { + return (vector signed char)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vsx_ld(int __a, const signed char *__b) { + return (vector signed char)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsx_ld(int __a, const vector unsigned char *__b) { + return (vector unsigned char)__builtin_vsx_lxvw4x(__a, __b); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vsx_ld(int __a, const unsigned char *__b) { + return (vector unsigned char)__builtin_vsx_lxvw4x(__a, __b); +} + +#endif + +/* vec_vsx_st */ + +#ifdef __VSX__ + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b, + vector bool int *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b, + signed int *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b, + unsigned int *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed int __a, int __b, + vector signed int *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed int __a, int __b, + signed int *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned int __a, int __b, + vector unsigned int *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned int __a, int __b, + unsigned int *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector float __a, int __b, + vector float *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector float __a, int __b, + float *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed long long __a, + int __b, + vector signed long long *__c) { + __builtin_vsx_stxvd2x((vector double)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned long long __a, + int __b, + vector unsigned long long *__c) { + __builtin_vsx_stxvd2x((vector double)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector double __a, int __b, + vector double *__c) { + __builtin_vsx_stxvd2x((vector double)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector double __a, int __b, + double *__c) { + __builtin_vsx_stxvd2x((vector double)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b, + vector bool short *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b, + signed short *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b, + unsigned short *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed short __a, int __b, + vector signed short *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed short __a, int __b, + signed short *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned short __a, + int __b, + vector unsigned short *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned short __a, + int __b, unsigned short *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b, + vector bool char *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b, + signed char *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b, + unsigned char *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed char __a, int __b, + vector signed char *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed char __a, int __b, + signed char *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned char __a, + int __b, + vector unsigned char *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned char __a, + int __b, unsigned char *__c) { + __builtin_vsx_stxvw4x((vector int)__a, __b, __c); +} + +#endif + +/* vec_xor */ + +#define __builtin_altivec_vxor vec_xor + +static __inline__ vector signed char __ATTRS_o_ai +vec_xor(vector signed char __a, vector signed char __b) { + return __a ^ __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_xor(vector bool char __a, vector signed char __b) { + return (vector signed char)__a ^ __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_xor(vector signed char __a, vector bool char __b) { + return __a ^ (vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_xor(vector unsigned char __a, vector unsigned char __b) { + return __a ^ __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_xor(vector bool char __a, vector unsigned char __b) { + return (vector unsigned char)__a ^ __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_xor(vector unsigned char __a, vector bool char __b) { + return __a ^ (vector unsigned char)__b; +} + +static __inline__ vector bool char __ATTRS_o_ai vec_xor(vector bool char __a, + vector bool char __b) { + return __a ^ __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_xor(vector short __a, + vector short __b) { + return __a ^ __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_xor(vector bool short __a, + vector short __b) { + return (vector short)__a ^ __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_xor(vector short __a, + vector bool short __b) { + return __a ^ (vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_xor(vector unsigned short __a, vector unsigned short __b) { + return __a ^ __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_xor(vector bool short __a, vector unsigned short __b) { + return (vector unsigned short)__a ^ __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_xor(vector unsigned short __a, vector bool short __b) { + return __a ^ (vector unsigned short)__b; +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_xor(vector bool short __a, vector bool short __b) { + return __a ^ __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_xor(vector int __a, + vector int __b) { + return __a ^ __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_xor(vector bool int __a, + vector int __b) { + return (vector int)__a ^ __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_xor(vector int __a, + vector bool int __b) { + return __a ^ (vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_xor(vector unsigned int __a, vector unsigned int __b) { + return __a ^ __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_xor(vector bool int __a, vector unsigned int __b) { + return (vector unsigned int)__a ^ __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_xor(vector unsigned int __a, vector bool int __b) { + return __a ^ (vector unsigned int)__b; +} + +static __inline__ vector bool int __ATTRS_o_ai vec_xor(vector bool int __a, + vector bool int __b) { + return __a ^ __b; +} + +static __inline__ vector float __ATTRS_o_ai vec_xor(vector float __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a ^ (vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_xor(vector bool int __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a ^ (vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_xor(vector float __a, + vector bool int __b) { + vector unsigned int __res = + (vector unsigned int)__a ^ (vector unsigned int)__b; + return (vector float)__res; +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_xor(vector signed long long __a, vector signed long long __b) { + return __a ^ __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_xor(vector bool long long __a, vector signed long long __b) { + return (vector signed long long)__a ^ __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_xor(vector signed long long __a, vector bool long long __b) { + return __a ^ (vector signed long long)__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_xor(vector unsigned long long __a, vector unsigned long long __b) { + return __a ^ __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_xor(vector bool long long __a, vector unsigned long long __b) { + return (vector unsigned long long)__a ^ __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_xor(vector unsigned long long __a, vector bool long long __b) { + return __a ^ (vector unsigned long long)__b; +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_xor(vector bool long long __a, vector bool long long __b) { + return __a ^ __b; +} + +static __inline__ vector double __ATTRS_o_ai vec_xor(vector double __a, + vector double __b) { + return (vector double)((vector unsigned long long)__a ^ + (vector unsigned long long)__b); +} + +static __inline__ vector double __ATTRS_o_ai +vec_xor(vector double __a, vector bool long long __b) { + return (vector double)((vector unsigned long long)__a ^ + (vector unsigned long long)__b); +} + +static __inline__ vector double __ATTRS_o_ai vec_xor(vector bool long long __a, + vector double __b) { + return (vector double)((vector unsigned long long)__a ^ + (vector unsigned long long)__b); +} +#endif + +/* vec_vxor */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_vxor(vector signed char __a, vector signed char __b) { + return __a ^ __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vxor(vector bool char __a, vector signed char __b) { + return (vector signed char)__a ^ __b; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vxor(vector signed char __a, vector bool char __b) { + return __a ^ (vector signed char)__b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vxor(vector unsigned char __a, vector unsigned char __b) { + return __a ^ __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vxor(vector bool char __a, vector unsigned char __b) { + return (vector unsigned char)__a ^ __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vxor(vector unsigned char __a, vector bool char __b) { + return __a ^ (vector unsigned char)__b; +} + +static __inline__ vector bool char __ATTRS_o_ai vec_vxor(vector bool char __a, + vector bool char __b) { + return __a ^ __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vxor(vector short __a, + vector short __b) { + return __a ^ __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vxor(vector bool short __a, + vector short __b) { + return (vector short)__a ^ __b; +} + +static __inline__ vector short __ATTRS_o_ai vec_vxor(vector short __a, + vector bool short __b) { + return __a ^ (vector short)__b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vxor(vector unsigned short __a, vector unsigned short __b) { + return __a ^ __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vxor(vector bool short __a, vector unsigned short __b) { + return (vector unsigned short)__a ^ __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_vxor(vector unsigned short __a, vector bool short __b) { + return __a ^ (vector unsigned short)__b; +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_vxor(vector bool short __a, vector bool short __b) { + return __a ^ __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vxor(vector int __a, + vector int __b) { + return __a ^ __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vxor(vector bool int __a, + vector int __b) { + return (vector int)__a ^ __b; +} + +static __inline__ vector int __ATTRS_o_ai vec_vxor(vector int __a, + vector bool int __b) { + return __a ^ (vector int)__b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vxor(vector unsigned int __a, vector unsigned int __b) { + return __a ^ __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vxor(vector bool int __a, vector unsigned int __b) { + return (vector unsigned int)__a ^ __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_vxor(vector unsigned int __a, vector bool int __b) { + return __a ^ (vector unsigned int)__b; +} + +static __inline__ vector bool int __ATTRS_o_ai vec_vxor(vector bool int __a, + vector bool int __b) { + return __a ^ __b; +} + +static __inline__ vector float __ATTRS_o_ai vec_vxor(vector float __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a ^ (vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_vxor(vector bool int __a, + vector float __b) { + vector unsigned int __res = + (vector unsigned int)__a ^ (vector unsigned int)__b; + return (vector float)__res; +} + +static __inline__ vector float __ATTRS_o_ai vec_vxor(vector float __a, + vector bool int __b) { + vector unsigned int __res = + (vector unsigned int)__a ^ (vector unsigned int)__b; + return (vector float)__res; +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_vxor(vector signed long long __a, vector signed long long __b) { + return __a ^ __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_vxor(vector bool long long __a, vector signed long long __b) { + return (vector signed long long)__a ^ __b; +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_vxor(vector signed long long __a, vector bool long long __b) { + return __a ^ (vector signed long long)__b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vxor(vector unsigned long long __a, vector unsigned long long __b) { + return __a ^ __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vxor(vector bool long long __a, vector unsigned long long __b) { + return (vector unsigned long long)__a ^ __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_vxor(vector unsigned long long __a, vector bool long long __b) { + return __a ^ (vector unsigned long long)__b; +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_vxor(vector bool long long __a, vector bool long long __b) { + return __a ^ __b; +} +#endif + +/* ------------------------ extensions for CBEA ----------------------------- */ + +/* vec_extract */ + +static __inline__ signed char __ATTRS_o_ai vec_extract(vector signed char __a, + int __b) { + return __a[__b]; +} + +static __inline__ unsigned char __ATTRS_o_ai +vec_extract(vector unsigned char __a, int __b) { + return __a[__b]; +} + +static __inline__ unsigned char __ATTRS_o_ai vec_extract(vector bool char __a, + int __b) { + return __a[__b]; +} + +static __inline__ signed short __ATTRS_o_ai vec_extract(vector signed short __a, + int __b) { + return __a[__b]; +} + +static __inline__ unsigned short __ATTRS_o_ai +vec_extract(vector unsigned short __a, int __b) { + return __a[__b]; +} + +static __inline__ unsigned short __ATTRS_o_ai vec_extract(vector bool short __a, + int __b) { + return __a[__b]; +} + +static __inline__ signed int __ATTRS_o_ai vec_extract(vector signed int __a, + int __b) { + return __a[__b]; +} + +static __inline__ unsigned int __ATTRS_o_ai vec_extract(vector unsigned int __a, + int __b) { + return __a[__b]; +} + +static __inline__ unsigned int __ATTRS_o_ai vec_extract(vector bool int __a, + int __b) { + return __a[__b]; +} + +#ifdef __VSX__ +static __inline__ signed long long __ATTRS_o_ai +vec_extract(vector signed long long __a, int __b) { + return __a[__b]; +} + +static __inline__ unsigned long long __ATTRS_o_ai +vec_extract(vector unsigned long long __a, int __b) { + return __a[__b]; +} + +static __inline__ unsigned long long __ATTRS_o_ai +vec_extract(vector bool long long __a, int __b) { + return __a[__b]; +} + +static __inline__ double __ATTRS_o_ai vec_extract(vector double __a, int __b) { + return __a[__b]; +} +#endif + +static __inline__ float __ATTRS_o_ai vec_extract(vector float __a, int __b) { + return __a[__b]; +} + +#ifdef __POWER9_VECTOR__ + +#define vec_insert4b __builtin_vsx_insertword +#define vec_extract4b __builtin_vsx_extractuword + +/* vec_extract_exp */ + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_extract_exp(vector float __a) { + return __builtin_vsx_xvxexpsp(__a); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_extract_exp(vector double __a) { + return __builtin_vsx_xvxexpdp(__a); +} + +/* vec_extract_sig */ + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_extract_sig(vector float __a) { + return __builtin_vsx_xvxsigsp(__a); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_extract_sig (vector double __a) { + return __builtin_vsx_xvxsigdp(__a); +} + +static __inline__ vector float __ATTRS_o_ai +vec_extract_fp32_from_shorth(vector unsigned short __a) { + vector unsigned short __b = +#ifdef __LITTLE_ENDIAN__ + __builtin_shufflevector(__a, __a, 0, -1, 1, -1, 2, -1, 3, -1); +#else + __builtin_shufflevector(__a, __a, -1, 0, -1, 1, -1, 2, -1, 3); +#endif + return __builtin_vsx_xvcvhpsp(__b); +} + +static __inline__ vector float __ATTRS_o_ai +vec_extract_fp32_from_shortl(vector unsigned short __a) { + vector unsigned short __b = +#ifdef __LITTLE_ENDIAN__ + __builtin_shufflevector(__a, __a, 4, -1, 5, -1, 6, -1, 7, -1); +#else + __builtin_shufflevector(__a, __a, -1, 4, -1, 5, -1, 6, -1, 7); +#endif + return __builtin_vsx_xvcvhpsp(__b); +} +#endif /* __POWER9_VECTOR__ */ + +/* vec_insert */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_insert(signed char __a, vector signed char __b, int __c) { + __b[__c] = __a; + return __b; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_insert(unsigned char __a, vector unsigned char __b, int __c) { + __b[__c] = __a; + return __b; +} + +static __inline__ vector bool char __ATTRS_o_ai vec_insert(unsigned char __a, + vector bool char __b, + int __c) { + __b[__c] = __a; + return __b; +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_insert(signed short __a, vector signed short __b, int __c) { + __b[__c] = __a; + return __b; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_insert(unsigned short __a, vector unsigned short __b, int __c) { + __b[__c] = __a; + return __b; +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_insert(unsigned short __a, vector bool short __b, int __c) { + __b[__c] = __a; + return __b; +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_insert(signed int __a, vector signed int __b, int __c) { + __b[__c] = __a; + return __b; +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_insert(unsigned int __a, vector unsigned int __b, int __c) { + __b[__c] = __a; + return __b; +} + +static __inline__ vector bool int __ATTRS_o_ai vec_insert(unsigned int __a, + vector bool int __b, + int __c) { + __b[__c] = __a; + return __b; +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_insert(signed long long __a, vector signed long long __b, int __c) { + __b[__c] = __a; + return __b; +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_insert(unsigned long long __a, vector unsigned long long __b, int __c) { + __b[__c] = __a; + return __b; +} + +static __inline__ vector bool long long __ATTRS_o_ai +vec_insert(unsigned long long __a, vector bool long long __b, int __c) { + __b[__c] = __a; + return __b; +} +static __inline__ vector double __ATTRS_o_ai vec_insert(double __a, + vector double __b, + int __c) { + __b[__c] = __a; + return __b; +} +#endif + +static __inline__ vector float __ATTRS_o_ai vec_insert(float __a, + vector float __b, + int __c) { + __b[__c] = __a; + return __b; +} + +/* vec_lvlx */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvlx(int __a, const signed char *__b) { + return vec_perm(vec_ld(__a, __b), (vector signed char)(0), + vec_lvsl(__a, __b)); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvlx(int __a, const vector signed char *__b) { + return vec_perm(vec_ld(__a, __b), (vector signed char)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvlx(int __a, const unsigned char *__b) { + return vec_perm(vec_ld(__a, __b), (vector unsigned char)(0), + vec_lvsl(__a, __b)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvlx(int __a, const vector unsigned char *__b) { + return vec_perm(vec_ld(__a, __b), (vector unsigned char)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_lvlx(int __a, const vector bool char *__b) { + return vec_perm(vec_ld(__a, __b), (vector bool char)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector short __ATTRS_o_ai vec_lvlx(int __a, + const short *__b) { + return vec_perm(vec_ld(__a, __b), (vector short)(0), vec_lvsl(__a, __b)); +} + +static __inline__ vector short __ATTRS_o_ai vec_lvlx(int __a, + const vector short *__b) { + return vec_perm(vec_ld(__a, __b), (vector short)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvlx(int __a, const unsigned short *__b) { + return vec_perm(vec_ld(__a, __b), (vector unsigned short)(0), + vec_lvsl(__a, __b)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvlx(int __a, const vector unsigned short *__b) { + return vec_perm(vec_ld(__a, __b), (vector unsigned short)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_lvlx(int __a, const vector bool short *__b) { + return vec_perm(vec_ld(__a, __b), (vector bool short)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_lvlx(int __a, + const vector pixel *__b) { + return vec_perm(vec_ld(__a, __b), (vector pixel)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector int __ATTRS_o_ai vec_lvlx(int __a, const int *__b) { + return vec_perm(vec_ld(__a, __b), (vector int)(0), vec_lvsl(__a, __b)); +} + +static __inline__ vector int __ATTRS_o_ai vec_lvlx(int __a, + const vector int *__b) { + return vec_perm(vec_ld(__a, __b), (vector int)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvlx(int __a, const unsigned int *__b) { + return vec_perm(vec_ld(__a, __b), (vector unsigned int)(0), + vec_lvsl(__a, __b)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvlx(int __a, const vector unsigned int *__b) { + return vec_perm(vec_ld(__a, __b), (vector unsigned int)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_lvlx(int __a, const vector bool int *__b) { + return vec_perm(vec_ld(__a, __b), (vector bool int)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvlx(int __a, + const float *__b) { + return vec_perm(vec_ld(__a, __b), (vector float)(0), vec_lvsl(__a, __b)); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvlx(int __a, + const vector float *__b) { + return vec_perm(vec_ld(__a, __b), (vector float)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +/* vec_lvlxl */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvlxl(int __a, const signed char *__b) { + return vec_perm(vec_ldl(__a, __b), (vector signed char)(0), + vec_lvsl(__a, __b)); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvlxl(int __a, const vector signed char *__b) { + return vec_perm(vec_ldl(__a, __b), (vector signed char)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvlxl(int __a, const unsigned char *__b) { + return vec_perm(vec_ldl(__a, __b), (vector unsigned char)(0), + vec_lvsl(__a, __b)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvlxl(int __a, const vector unsigned char *__b) { + return vec_perm(vec_ldl(__a, __b), (vector unsigned char)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_lvlxl(int __a, const vector bool char *__b) { + return vec_perm(vec_ldl(__a, __b), (vector bool char)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector short __ATTRS_o_ai vec_lvlxl(int __a, + const short *__b) { + return vec_perm(vec_ldl(__a, __b), (vector short)(0), vec_lvsl(__a, __b)); +} + +static __inline__ vector short __ATTRS_o_ai vec_lvlxl(int __a, + const vector short *__b) { + return vec_perm(vec_ldl(__a, __b), (vector short)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvlxl(int __a, const unsigned short *__b) { + return vec_perm(vec_ldl(__a, __b), (vector unsigned short)(0), + vec_lvsl(__a, __b)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvlxl(int __a, const vector unsigned short *__b) { + return vec_perm(vec_ldl(__a, __b), (vector unsigned short)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_lvlxl(int __a, const vector bool short *__b) { + return vec_perm(vec_ldl(__a, __b), (vector bool short)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_lvlxl(int __a, + const vector pixel *__b) { + return vec_perm(vec_ldl(__a, __b), (vector pixel)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector int __ATTRS_o_ai vec_lvlxl(int __a, const int *__b) { + return vec_perm(vec_ldl(__a, __b), (vector int)(0), vec_lvsl(__a, __b)); +} + +static __inline__ vector int __ATTRS_o_ai vec_lvlxl(int __a, + const vector int *__b) { + return vec_perm(vec_ldl(__a, __b), (vector int)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvlxl(int __a, const unsigned int *__b) { + return vec_perm(vec_ldl(__a, __b), (vector unsigned int)(0), + vec_lvsl(__a, __b)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvlxl(int __a, const vector unsigned int *__b) { + return vec_perm(vec_ldl(__a, __b), (vector unsigned int)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_lvlxl(int __a, const vector bool int *__b) { + return vec_perm(vec_ldl(__a, __b), (vector bool int)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvlxl(int __a, + const float *__b) { + return vec_perm(vec_ldl(__a, __b), (vector float)(0), vec_lvsl(__a, __b)); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvlxl(int __a, + vector float *__b) { + return vec_perm(vec_ldl(__a, __b), (vector float)(0), + vec_lvsl(__a, (unsigned char *)__b)); +} + +/* vec_lvrx */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvrx(int __a, const signed char *__b) { + return vec_perm((vector signed char)(0), vec_ld(__a, __b), + vec_lvsl(__a, __b)); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvrx(int __a, const vector signed char *__b) { + return vec_perm((vector signed char)(0), vec_ld(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvrx(int __a, const unsigned char *__b) { + return vec_perm((vector unsigned char)(0), vec_ld(__a, __b), + vec_lvsl(__a, __b)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvrx(int __a, const vector unsigned char *__b) { + return vec_perm((vector unsigned char)(0), vec_ld(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_lvrx(int __a, const vector bool char *__b) { + return vec_perm((vector bool char)(0), vec_ld(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector short __ATTRS_o_ai vec_lvrx(int __a, + const short *__b) { + return vec_perm((vector short)(0), vec_ld(__a, __b), vec_lvsl(__a, __b)); +} + +static __inline__ vector short __ATTRS_o_ai vec_lvrx(int __a, + const vector short *__b) { + return vec_perm((vector short)(0), vec_ld(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvrx(int __a, const unsigned short *__b) { + return vec_perm((vector unsigned short)(0), vec_ld(__a, __b), + vec_lvsl(__a, __b)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvrx(int __a, const vector unsigned short *__b) { + return vec_perm((vector unsigned short)(0), vec_ld(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_lvrx(int __a, const vector bool short *__b) { + return vec_perm((vector bool short)(0), vec_ld(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_lvrx(int __a, + const vector pixel *__b) { + return vec_perm((vector pixel)(0), vec_ld(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector int __ATTRS_o_ai vec_lvrx(int __a, const int *__b) { + return vec_perm((vector int)(0), vec_ld(__a, __b), vec_lvsl(__a, __b)); +} + +static __inline__ vector int __ATTRS_o_ai vec_lvrx(int __a, + const vector int *__b) { + return vec_perm((vector int)(0), vec_ld(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvrx(int __a, const unsigned int *__b) { + return vec_perm((vector unsigned int)(0), vec_ld(__a, __b), + vec_lvsl(__a, __b)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvrx(int __a, const vector unsigned int *__b) { + return vec_perm((vector unsigned int)(0), vec_ld(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_lvrx(int __a, const vector bool int *__b) { + return vec_perm((vector bool int)(0), vec_ld(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvrx(int __a, + const float *__b) { + return vec_perm((vector float)(0), vec_ld(__a, __b), vec_lvsl(__a, __b)); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvrx(int __a, + const vector float *__b) { + return vec_perm((vector float)(0), vec_ld(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +/* vec_lvrxl */ + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvrxl(int __a, const signed char *__b) { + return vec_perm((vector signed char)(0), vec_ldl(__a, __b), + vec_lvsl(__a, __b)); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_lvrxl(int __a, const vector signed char *__b) { + return vec_perm((vector signed char)(0), vec_ldl(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvrxl(int __a, const unsigned char *__b) { + return vec_perm((vector unsigned char)(0), vec_ldl(__a, __b), + vec_lvsl(__a, __b)); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_lvrxl(int __a, const vector unsigned char *__b) { + return vec_perm((vector unsigned char)(0), vec_ldl(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector bool char __ATTRS_o_ai +vec_lvrxl(int __a, const vector bool char *__b) { + return vec_perm((vector bool char)(0), vec_ldl(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector short __ATTRS_o_ai vec_lvrxl(int __a, + const short *__b) { + return vec_perm((vector short)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b)); +} + +static __inline__ vector short __ATTRS_o_ai vec_lvrxl(int __a, + const vector short *__b) { + return vec_perm((vector short)(0), vec_ldl(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvrxl(int __a, const unsigned short *__b) { + return vec_perm((vector unsigned short)(0), vec_ldl(__a, __b), + vec_lvsl(__a, __b)); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_lvrxl(int __a, const vector unsigned short *__b) { + return vec_perm((vector unsigned short)(0), vec_ldl(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_lvrxl(int __a, const vector bool short *__b) { + return vec_perm((vector bool short)(0), vec_ldl(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector pixel __ATTRS_o_ai vec_lvrxl(int __a, + const vector pixel *__b) { + return vec_perm((vector pixel)(0), vec_ldl(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector int __ATTRS_o_ai vec_lvrxl(int __a, const int *__b) { + return vec_perm((vector int)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b)); +} + +static __inline__ vector int __ATTRS_o_ai vec_lvrxl(int __a, + const vector int *__b) { + return vec_perm((vector int)(0), vec_ldl(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvrxl(int __a, const unsigned int *__b) { + return vec_perm((vector unsigned int)(0), vec_ldl(__a, __b), + vec_lvsl(__a, __b)); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_lvrxl(int __a, const vector unsigned int *__b) { + return vec_perm((vector unsigned int)(0), vec_ldl(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_lvrxl(int __a, const vector bool int *__b) { + return vec_perm((vector bool int)(0), vec_ldl(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvrxl(int __a, + const float *__b) { + return vec_perm((vector float)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b)); +} + +static __inline__ vector float __ATTRS_o_ai vec_lvrxl(int __a, + const vector float *__b) { + return vec_perm((vector float)(0), vec_ldl(__a, __b), + vec_lvsl(__a, (unsigned char *)__b)); +} + +/* vec_stvlx */ + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector signed char __a, int __b, + signed char *__c) { + return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector signed char __a, int __b, + vector signed char *__c) { + return vec_st( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned char __a, int __b, + unsigned char *__c) { + return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned char __a, int __b, + vector unsigned char *__c) { + return vec_st( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector bool char __a, int __b, + vector bool char *__c) { + return vec_st( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector short __a, int __b, + short *__c) { + return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector short __a, int __b, + vector short *__c) { + return vec_st( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned short __a, + int __b, unsigned short *__c) { + return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned short __a, + int __b, + vector unsigned short *__c) { + return vec_st( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector bool short __a, int __b, + vector bool short *__c) { + return vec_st( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector pixel __a, int __b, + vector pixel *__c) { + return vec_st( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector int __a, int __b, + int *__c) { + return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector int __a, int __b, + vector int *__c) { + return vec_st( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned int __a, int __b, + unsigned int *__c) { + return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned int __a, int __b, + vector unsigned int *__c) { + return vec_st( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector bool int __a, int __b, + vector bool int *__c) { + return vec_st( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlx(vector float __a, int __b, + vector float *__c) { + return vec_st( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +/* vec_stvlxl */ + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector signed char __a, int __b, + signed char *__c) { + return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector signed char __a, int __b, + vector signed char *__c) { + return vec_stl( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned char __a, + int __b, unsigned char *__c) { + return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned char __a, + int __b, + vector unsigned char *__c) { + return vec_stl( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector bool char __a, int __b, + vector bool char *__c) { + return vec_stl( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector short __a, int __b, + short *__c) { + return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector short __a, int __b, + vector short *__c) { + return vec_stl( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned short __a, + int __b, unsigned short *__c) { + return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned short __a, + int __b, + vector unsigned short *__c) { + return vec_stl( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector bool short __a, int __b, + vector bool short *__c) { + return vec_stl( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector pixel __a, int __b, + vector pixel *__c) { + return vec_stl( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector int __a, int __b, + int *__c) { + return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector int __a, int __b, + vector int *__c) { + return vec_stl( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned int __a, int __b, + unsigned int *__c) { + return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned int __a, int __b, + vector unsigned int *__c) { + return vec_stl( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector bool int __a, int __b, + vector bool int *__c) { + return vec_stl( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvlxl(vector float __a, int __b, + vector float *__c) { + return vec_stl( + vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +/* vec_stvrx */ + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector signed char __a, int __b, + signed char *__c) { + return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector signed char __a, int __b, + vector signed char *__c) { + return vec_st( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned char __a, int __b, + unsigned char *__c) { + return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned char __a, int __b, + vector unsigned char *__c) { + return vec_st( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector bool char __a, int __b, + vector bool char *__c) { + return vec_st( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector short __a, int __b, + short *__c) { + return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector short __a, int __b, + vector short *__c) { + return vec_st( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned short __a, + int __b, unsigned short *__c) { + return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned short __a, + int __b, + vector unsigned short *__c) { + return vec_st( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector bool short __a, int __b, + vector bool short *__c) { + return vec_st( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector pixel __a, int __b, + vector pixel *__c) { + return vec_st( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector int __a, int __b, + int *__c) { + return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector int __a, int __b, + vector int *__c) { + return vec_st( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned int __a, int __b, + unsigned int *__c) { + return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned int __a, int __b, + vector unsigned int *__c) { + return vec_st( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector bool int __a, int __b, + vector bool int *__c) { + return vec_st( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrx(vector float __a, int __b, + vector float *__c) { + return vec_st( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +/* vec_stvrxl */ + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector signed char __a, int __b, + signed char *__c) { + return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector signed char __a, int __b, + vector signed char *__c) { + return vec_stl( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned char __a, + int __b, unsigned char *__c) { + return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned char __a, + int __b, + vector unsigned char *__c) { + return vec_stl( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector bool char __a, int __b, + vector bool char *__c) { + return vec_stl( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector short __a, int __b, + short *__c) { + return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector short __a, int __b, + vector short *__c) { + return vec_stl( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned short __a, + int __b, unsigned short *__c) { + return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned short __a, + int __b, + vector unsigned short *__c) { + return vec_stl( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector bool short __a, int __b, + vector bool short *__c) { + return vec_stl( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector pixel __a, int __b, + vector pixel *__c) { + return vec_stl( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector int __a, int __b, + int *__c) { + return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector int __a, int __b, + vector int *__c) { + return vec_stl( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned int __a, int __b, + unsigned int *__c) { + return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b, + __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned int __a, int __b, + vector unsigned int *__c) { + return vec_stl( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector bool int __a, int __b, + vector bool int *__c) { + return vec_stl( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +static __inline__ void __ATTRS_o_ai vec_stvrxl(vector float __a, int __b, + vector float *__c) { + return vec_stl( + vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)), + __b, __c); +} + +/* vec_promote */ + +static __inline__ vector signed char __ATTRS_o_ai vec_promote(signed char __a, + int __b) { + vector signed char __res = (vector signed char)(0); + __res[__b] = __a; + return __res; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_promote(unsigned char __a, int __b) { + vector unsigned char __res = (vector unsigned char)(0); + __res[__b] = __a; + return __res; +} + +static __inline__ vector short __ATTRS_o_ai vec_promote(short __a, int __b) { + vector short __res = (vector short)(0); + __res[__b] = __a; + return __res; +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_promote(unsigned short __a, int __b) { + vector unsigned short __res = (vector unsigned short)(0); + __res[__b] = __a; + return __res; +} + +static __inline__ vector int __ATTRS_o_ai vec_promote(int __a, int __b) { + vector int __res = (vector int)(0); + __res[__b] = __a; + return __res; +} + +static __inline__ vector unsigned int __ATTRS_o_ai vec_promote(unsigned int __a, + int __b) { + vector unsigned int __res = (vector unsigned int)(0); + __res[__b] = __a; + return __res; +} + +static __inline__ vector float __ATTRS_o_ai vec_promote(float __a, int __b) { + vector float __res = (vector float)(0); + __res[__b] = __a; + return __res; +} + +/* vec_splats */ + +static __inline__ vector signed char __ATTRS_o_ai vec_splats(signed char __a) { + return (vector signed char)(__a); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_splats(unsigned char __a) { + return (vector unsigned char)(__a); +} + +static __inline__ vector short __ATTRS_o_ai vec_splats(short __a) { + return (vector short)(__a); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_splats(unsigned short __a) { + return (vector unsigned short)(__a); +} + +static __inline__ vector int __ATTRS_o_ai vec_splats(int __a) { + return (vector int)(__a); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_splats(unsigned int __a) { + return (vector unsigned int)(__a); +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_splats(signed long long __a) { + return (vector signed long long)(__a); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_splats(unsigned long long __a) { + return (vector unsigned long long)(__a); +} + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_splats(signed __int128 __a) { + return (vector signed __int128)(__a); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_splats(unsigned __int128 __a) { + return (vector unsigned __int128)(__a); +} + +#endif + +static __inline__ vector double __ATTRS_o_ai vec_splats(double __a) { + return (vector double)(__a); +} +#endif + +static __inline__ vector float __ATTRS_o_ai vec_splats(float __a) { + return (vector float)(__a); +} + +/* ----------------------------- predicates --------------------------------- */ + +/* vec_all_eq */ + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed char __a, + vector signed char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed char __a, + vector bool char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned char __a, + vector bool char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool char __a, + vector signed char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool char __a, + vector bool char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector short __a, + vector short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector short __a, + vector bool short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT, __a, (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned short __a, + vector bool short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool short __a, + vector short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool short __a, + vector bool short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector pixel __a, + vector pixel __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector int __a, vector int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector int __a, + vector bool int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT, __a, (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned int __a, + vector bool int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a, + vector int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a, + vector bool int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a, + (vector int)__b); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_LT, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_LT, __a, (vector long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a, + (vector long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a, + (vector long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a, + vector long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a, + (vector long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a, + (vector long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a, + (vector long long)__b); +} +#endif + +static __inline__ int __ATTRS_o_ai vec_all_eq(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpeqsp_p(__CR6_LT, __a, __b); +#else + return __builtin_altivec_vcmpeqfp_p(__CR6_LT, __a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_eq(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpeqdp_p(__CR6_LT, __a, __b); +} +#endif + +/* vec_all_ge */ + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, (vector signed char)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b, + (vector unsigned char)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __b, (vector unsigned char)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b, + (vector unsigned char)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector short __a, + vector short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, (vector short)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a, + vector short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b, + (vector unsigned short)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __b, + (vector unsigned short)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b, + (vector unsigned short)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector int __a, vector int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, (vector int)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a, + vector int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b, + (vector unsigned int)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __b, (vector unsigned int)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b, + (vector unsigned int)__a); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __b, __a); +} +static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, (vector signed long long)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b, + (vector unsigned long long)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __b, + (vector unsigned long long)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b, + (vector unsigned long long)__a); +} +#endif + +static __inline__ int __ATTRS_o_ai vec_all_ge(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgesp_p(__CR6_LT, __a, __b); +#else + return __builtin_altivec_vcmpgefp_p(__CR6_LT, __a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_ge(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgedp_p(__CR6_LT, __a, __b); +} +#endif + +/* vec_all_gt */ + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __a, (vector signed char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT, __a, (vector unsigned char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a, + (vector unsigned char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a, + (vector unsigned char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector short __a, + vector short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __a, (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __a, + (vector unsigned short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a, + vector short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a, + (vector unsigned short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a, + (vector unsigned short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector int __a, vector int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __a, (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __a, (vector unsigned int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a, + vector int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a, + (vector unsigned int)__b); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __a, __b); +} +static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __a, + (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT, __a, + (vector unsigned long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a, + (vector unsigned long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a, + (vector unsigned long long)__b); +} +#endif + +static __inline__ int __ATTRS_o_ai vec_all_gt(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgtsp_p(__CR6_LT, __a, __b); +#else + return __builtin_altivec_vcmpgtfp_p(__CR6_LT, __a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_gt(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgtdp_p(__CR6_LT, __a, __b); +} +#endif + +/* vec_all_in */ + +static __inline__ int __attribute__((__always_inline__)) +vec_all_in(vector float __a, vector float __b) { + return __builtin_altivec_vcmpbfp_p(__CR6_EQ, __a, __b); +} + +/* vec_all_le */ + +static __inline__ int __ATTRS_o_ai vec_all_le(vector signed char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector signed char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __a, (vector signed char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __a, (vector unsigned char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a, + (vector unsigned char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a, + (vector unsigned char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector short __a, + vector short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __a, (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __a, + (vector unsigned short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a, + vector short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a, + (vector unsigned short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a, + (vector unsigned short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector int __a, vector int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __a, (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __a, (vector unsigned int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a, + vector int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a, + (vector unsigned int)__b); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ int __ATTRS_o_ai vec_all_le(vector signed long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector signed long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __a, + (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __a, + (vector unsigned long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a, + (vector unsigned long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a, + (vector unsigned long long)__b); +} +#endif + +static __inline__ int __ATTRS_o_ai vec_all_le(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgesp_p(__CR6_LT, __b, __a); +#else + return __builtin_altivec_vcmpgefp_p(__CR6_LT, __b, __a); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_le(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgedp_p(__CR6_LT, __b, __a); +} +#endif + +/* vec_all_lt */ + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_LT, (vector signed char)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b, + (vector unsigned char)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT, __b, (vector unsigned char)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b, + (vector unsigned char)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector short __a, + vector short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_LT, (vector short)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a, + vector short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b, + (vector unsigned short)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __b, + (vector unsigned short)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b, + (vector unsigned short)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector int __a, vector int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_LT, (vector int)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a, + vector int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b, + (vector unsigned int)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __b, (vector unsigned int)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b, + (vector unsigned int)__a); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_LT, (vector signed long long)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b, + (vector unsigned long long)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT, __b, + (vector unsigned long long)__a); +} + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b, + (vector unsigned long long)__a); +} +#endif + +static __inline__ int __ATTRS_o_ai vec_all_lt(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgtsp_p(__CR6_LT, __b, __a); +#else + return __builtin_altivec_vcmpgtfp_p(__CR6_LT, __b, __a); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_lt(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgtdp_p(__CR6_LT, __b, __a); +} +#endif + +/* vec_all_nan */ + +static __inline__ int __ATTRS_o_ai vec_all_nan(vector float __a) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ, __a, __a); +#else + return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __a); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_nan(vector double __a) { + return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __a); +} +#endif + +/* vec_all_ne */ + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed char __a, + vector signed char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed char __a, + vector bool char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned char __a, + vector bool char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool char __a, + vector signed char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool char __a, + vector bool char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector short __a, + vector short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector short __a, + vector bool short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ, __a, (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned short __a, + vector bool short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool short __a, + vector short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool short __a, + vector bool short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector pixel __a, + vector pixel __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector int __a, vector int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector int __a, + vector bool int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ, __a, (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned int __a, + vector bool int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a, + vector int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a, + vector bool int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a, + (vector int)__b); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_EQ, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector long long)__a, + (vector long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_EQ, __a, + (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a, + (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a, + (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a, + (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a, + (vector signed long long)__b); +} +#endif + +static __inline__ int __ATTRS_o_ai vec_all_ne(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __b); +#else + return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_ne(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __b); +} +#endif + +/* vec_all_nge */ + +static __inline__ int __ATTRS_o_ai vec_all_nge(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgesp_p(__CR6_EQ, __a, __b); +#else + return __builtin_altivec_vcmpgefp_p(__CR6_EQ, __a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_nge(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgedp_p(__CR6_EQ, __a, __b); +} +#endif + +/* vec_all_ngt */ + +static __inline__ int __ATTRS_o_ai vec_all_ngt(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ, __a, __b); +#else + return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, __a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_all_ngt(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ, __a, __b); +} +#endif + +/* vec_all_nle */ + +static __inline__ int __attribute__((__always_inline__)) +vec_all_nle(vector float __a, vector float __b) { + return __builtin_altivec_vcmpgefp_p(__CR6_EQ, __b, __a); +} + +/* vec_all_nlt */ + +static __inline__ int __attribute__((__always_inline__)) +vec_all_nlt(vector float __a, vector float __b) { + return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, __b, __a); +} + +/* vec_all_numeric */ + +static __inline__ int __attribute__((__always_inline__)) +vec_all_numeric(vector float __a) { + return __builtin_altivec_vcmpeqfp_p(__CR6_LT, __a, __a); +} + +/* vec_any_eq */ + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed char __a, + vector signed char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed char __a, + vector bool char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned char __a, + vector bool char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool char __a, + vector signed char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool char __a, + vector bool char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector short __a, + vector short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector short __a, + vector bool short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, __a, (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned short __a, + vector bool short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool short __a, + vector short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool short __a, + vector bool short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector pixel __a, + vector pixel __b) { + return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector int __a, vector int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector int __a, + vector bool int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, __a, (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned int __a, + vector bool int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a, + vector int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a, + vector bool int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a, + (vector int)__b); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, (vector long long)__a, + (vector long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, __a, + (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpequd_p( + __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpequd_p( + __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpequd_p( + __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpequd_p( + __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b); +} +#endif + +static __inline__ int __ATTRS_o_ai vec_any_eq(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ_REV, __a, __b); +#else + return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, __a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_any_eq(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ_REV, __a, __b); +} +#endif + +/* vec_any_ge */ + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, (vector signed char)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b, + (vector unsigned char)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __b, + (vector unsigned char)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b, + (vector unsigned char)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector short __a, + vector short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, (vector short)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a, + vector short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b, + (vector unsigned short)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __b, + (vector unsigned short)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b, + (vector unsigned short)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector int __a, vector int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, (vector int)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a, + vector int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b, + (vector unsigned int)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __b, + (vector unsigned int)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b, + (vector unsigned int)__a); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, + (vector signed long long)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, + (vector unsigned long long)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, + (vector unsigned long long)__b, + (vector unsigned long long)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __b, + (vector unsigned long long)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, + (vector unsigned long long)__b, + (vector unsigned long long)__a); +} +#endif + +static __inline__ int __ATTRS_o_ai vec_any_ge(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgesp_p(__CR6_EQ_REV, __a, __b); +#else + return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, __a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_any_ge(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgedp_p(__CR6_EQ_REV, __a, __b); +} +#endif + +/* vec_any_gt */ + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __a, + (vector signed char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __a, + (vector unsigned char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a, + (vector unsigned char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a, + (vector unsigned char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector short __a, + vector short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __a, (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __a, + (vector unsigned short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a, + vector short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a, + (vector unsigned short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a, + (vector unsigned short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector int __a, vector int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __a, (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __a, + (vector unsigned int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a, + vector int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a, + (vector unsigned int)__b); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __a, + (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __a, + (vector unsigned long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, + (vector unsigned long long)__a, + (vector unsigned long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, + (vector unsigned long long)__a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, + (vector unsigned long long)__a, + (vector unsigned long long)__b); +} +#endif + +static __inline__ int __ATTRS_o_ai vec_any_gt(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ_REV, __a, __b); +#else + return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, __a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_any_gt(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ_REV, __a, __b); +} +#endif + +/* vec_any_le */ + +static __inline__ int __ATTRS_o_ai vec_any_le(vector signed char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector signed char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __a, + (vector signed char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __a, + (vector unsigned char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a, + (vector unsigned char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a, + (vector unsigned char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector short __a, + vector short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __a, (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __a, + (vector unsigned short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a, + vector short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a, + (vector unsigned short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a, + (vector unsigned short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector int __a, vector int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __a, (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __a, + (vector unsigned int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a, + vector int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a, + (vector unsigned int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a, + __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a, + (vector unsigned int)__b); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ int __ATTRS_o_ai vec_any_le(vector signed long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector signed long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __a, + (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __a, + (vector unsigned long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, + (vector unsigned long long)__a, + (vector unsigned long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, + (vector unsigned long long)__a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, + (vector unsigned long long)__a, + (vector unsigned long long)__b); +} +#endif + +static __inline__ int __ATTRS_o_ai vec_any_le(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgesp_p(__CR6_EQ_REV, __b, __a); +#else + return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, __b, __a); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_any_le(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgedp_p(__CR6_EQ_REV, __b, __a); +} +#endif + +/* vec_any_lt */ + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, (vector signed char)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a, + vector signed char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b, + (vector unsigned char)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __b, + (vector unsigned char)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a, + vector bool char __b) { + return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b, + (vector unsigned char)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector short __a, + vector short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, (vector short)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a, + vector short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b, + (vector unsigned short)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __b, + (vector unsigned short)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a, + vector bool short __b) { + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b, + (vector unsigned short)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector int __a, vector int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, (vector int)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b, + __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a, + vector int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b, + (vector unsigned int)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __b, + (vector unsigned int)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a, + vector bool int __b) { + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b, + (vector unsigned int)__a); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, + (vector signed long long)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, + (vector unsigned long long)__b, __a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, + (vector unsigned long long)__b, + (vector unsigned long long)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __b, + (vector unsigned long long)__a); +} + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, + (vector unsigned long long)__b, + (vector unsigned long long)__a); +} +#endif + +static __inline__ int __ATTRS_o_ai vec_any_lt(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ_REV, __b, __a); +#else + return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, __b, __a); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_any_lt(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ_REV, __b, __a); +} +#endif + +/* vec_any_nan */ + +static __inline__ int __attribute__((__always_inline__)) +vec_any_nan(vector float __a) { + return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, __a, __a); +} + +/* vec_any_ne */ + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed char __a, + vector signed char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed char __a, + vector bool char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned char __a, + vector bool char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool char __a, + vector signed char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool char __a, + vector unsigned char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool char __a, + vector bool char __b) { + return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a, + (vector char)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector short __a, + vector short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector short __a, + vector bool short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, __a, (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned short __a, + vector bool short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool short __a, + vector short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool short __a, + vector unsigned short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool short __a, + vector bool short __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector pixel __a, + vector pixel __b) { + return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a, + (vector short)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector int __a, vector int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector int __a, + vector bool int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, __a, (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned int __a, + vector bool int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a, + vector int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a, + vector unsigned int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a, + (vector int)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a, + vector bool int __b) { + return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a, + (vector int)__b); +} + +#ifdef __POWER8_VECTOR__ +static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, __a, __b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, (vector long long)__a, + (vector long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, __a, + (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpequd_p( + __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a, + vector signed long long __b) { + return __builtin_altivec_vcmpequd_p( + __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a, + vector unsigned long long __b) { + return __builtin_altivec_vcmpequd_p( + __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b); +} + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a, + vector bool long long __b) { + return __builtin_altivec_vcmpequd_p( + __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b); +} +#endif + +static __inline__ int __ATTRS_o_ai vec_any_ne(vector float __a, + vector float __b) { +#ifdef __VSX__ + return __builtin_vsx_xvcmpeqsp_p(__CR6_LT_REV, __a, __b); +#else + return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, __a, __b); +#endif +} + +#ifdef __VSX__ +static __inline__ int __ATTRS_o_ai vec_any_ne(vector double __a, + vector double __b) { + return __builtin_vsx_xvcmpeqdp_p(__CR6_LT_REV, __a, __b); +} +#endif + +/* vec_any_nge */ + +static __inline__ int __attribute__((__always_inline__)) +vec_any_nge(vector float __a, vector float __b) { + return __builtin_altivec_vcmpgefp_p(__CR6_LT_REV, __a, __b); +} + +/* vec_any_ngt */ + +static __inline__ int __attribute__((__always_inline__)) +vec_any_ngt(vector float __a, vector float __b) { + return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, __a, __b); +} + +/* vec_any_nle */ + +static __inline__ int __attribute__((__always_inline__)) +vec_any_nle(vector float __a, vector float __b) { + return __builtin_altivec_vcmpgefp_p(__CR6_LT_REV, __b, __a); +} + +/* vec_any_nlt */ + +static __inline__ int __attribute__((__always_inline__)) +vec_any_nlt(vector float __a, vector float __b) { + return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, __b, __a); +} + +/* vec_any_numeric */ + +static __inline__ int __attribute__((__always_inline__)) +vec_any_numeric(vector float __a) { + return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, __a, __a); +} + +/* vec_any_out */ + +static __inline__ int __attribute__((__always_inline__)) +vec_any_out(vector float __a, vector float __b) { + return __builtin_altivec_vcmpbfp_p(__CR6_EQ_REV, __a, __b); +} + +/* Power 8 Crypto functions +Note: We diverge from the current GCC implementation with regard +to cryptography and related functions as follows: +- Only the SHA and AES instructions and builtins are disabled by -mno-crypto +- The remaining ones are only available on Power8 and up so + require -mpower8-vector +The justification for this is that export requirements require that +Category:Vector.Crypto is optional (i.e. compliant hardware may not provide +support). As a result, we need to be able to turn off support for those. +The remaining ones (currently controlled by -mcrypto for GCC) still +need to be provided on compliant hardware even if Vector.Crypto is not +provided. +*/ +#ifdef __CRYPTO__ +#define vec_sbox_be __builtin_altivec_crypto_vsbox +#define vec_cipher_be __builtin_altivec_crypto_vcipher +#define vec_cipherlast_be __builtin_altivec_crypto_vcipherlast +#define vec_ncipher_be __builtin_altivec_crypto_vncipher +#define vec_ncipherlast_be __builtin_altivec_crypto_vncipherlast + +static __inline__ vector unsigned long long __attribute__((__always_inline__)) +__builtin_crypto_vsbox(vector unsigned long long __a) { + return __builtin_altivec_crypto_vsbox(__a); +} + +static __inline__ vector unsigned long long __attribute__((__always_inline__)) +__builtin_crypto_vcipher(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_crypto_vcipher(__a, __b); +} + +static __inline__ vector unsigned long long __attribute__((__always_inline__)) +__builtin_crypto_vcipherlast(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_crypto_vcipherlast(__a, __b); +} + +static __inline__ vector unsigned long long __attribute__((__always_inline__)) +__builtin_crypto_vncipher(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_crypto_vncipher(__a, __b); +} + +static __inline__ vector unsigned long long __attribute__((__always_inline__)) +__builtin_crypto_vncipherlast(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_crypto_vncipherlast(__a, __b); +} + +#define __builtin_crypto_vshasigmad __builtin_altivec_crypto_vshasigmad +#define __builtin_crypto_vshasigmaw __builtin_altivec_crypto_vshasigmaw + +#define vec_shasigma_be(X, Y, Z) \ + _Generic((X), vector unsigned int \ + : __builtin_crypto_vshasigmaw, vector unsigned long long \ + : __builtin_crypto_vshasigmad)((X), (Y), (Z)) +#endif + +#ifdef __POWER8_VECTOR__ +static __inline__ vector bool char __ATTRS_o_ai +vec_permxor(vector bool char __a, vector bool char __b, + vector bool char __c) { + return __builtin_altivec_crypto_vpermxor(__a, __b, __c); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_permxor(vector signed char __a, vector signed char __b, + vector signed char __c) { + return __builtin_altivec_crypto_vpermxor(__a, __b, __c); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_permxor(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return __builtin_altivec_crypto_vpermxor(__a, __b, __c); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +__builtin_crypto_vpermxor(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return __builtin_altivec_crypto_vpermxor(__a, __b, __c); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +__builtin_crypto_vpermxor(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c) { + return (vector unsigned short)__builtin_altivec_crypto_vpermxor( + (vector unsigned char)__a, (vector unsigned char)__b, + (vector unsigned char)__c); +} + +static __inline__ vector unsigned int __ATTRS_o_ai __builtin_crypto_vpermxor( + vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) { + return (vector unsigned int)__builtin_altivec_crypto_vpermxor( + (vector unsigned char)__a, (vector unsigned char)__b, + (vector unsigned char)__c); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +__builtin_crypto_vpermxor(vector unsigned long long __a, + vector unsigned long long __b, + vector unsigned long long __c) { + return (vector unsigned long long)__builtin_altivec_crypto_vpermxor( + (vector unsigned char)__a, (vector unsigned char)__b, + (vector unsigned char)__c); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +__builtin_crypto_vpmsumb(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_crypto_vpmsumb(__a, __b); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +__builtin_crypto_vpmsumb(vector unsigned short __a, vector unsigned short __b) { + return __builtin_altivec_crypto_vpmsumh(__a, __b); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +__builtin_crypto_vpmsumb(vector unsigned int __a, vector unsigned int __b) { + return __builtin_altivec_crypto_vpmsumw(__a, __b); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +__builtin_crypto_vpmsumb(vector unsigned long long __a, + vector unsigned long long __b) { + return __builtin_altivec_crypto_vpmsumd(__a, __b); +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_vgbbd(vector signed char __a) { + return __builtin_altivec_vgbbd((vector unsigned char)__a); +} + +#define vec_pmsum_be __builtin_crypto_vpmsumb +#define vec_gb __builtin_altivec_vgbbd + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_vgbbd(vector unsigned char __a) { + return __builtin_altivec_vgbbd(__a); +} + +static __inline__ vector long long __ATTRS_o_ai +vec_vbpermq(vector signed char __a, vector signed char __b) { + return __builtin_altivec_vbpermq((vector unsigned char)__a, + (vector unsigned char)__b); +} + +static __inline__ vector long long __ATTRS_o_ai +vec_vbpermq(vector unsigned char __a, vector unsigned char __b) { + return __builtin_altivec_vbpermq(__a, __b); +} + +#ifdef __powerpc64__ +static __inline__ vector unsigned long long __attribute__((__always_inline__)) +vec_bperm(vector unsigned __int128 __a, vector unsigned char __b) { + return __builtin_altivec_vbpermq((vector unsigned char)__a, + (vector unsigned char)__b); +} +#endif +#endif + + +/* vec_reve */ + +static inline __ATTRS_o_ai vector bool char vec_reve(vector bool char __a) { + return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, + 5, 4, 3, 2, 1, 0); +} + +static inline __ATTRS_o_ai vector signed char vec_reve(vector signed char __a) { + return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, + 5, 4, 3, 2, 1, 0); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_reve(vector unsigned char __a) { + return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, + 5, 4, 3, 2, 1, 0); +} + +static inline __ATTRS_o_ai vector bool int vec_reve(vector bool int __a) { + return __builtin_shufflevector(__a, __a, 3, 2, 1, 0); +} + +static inline __ATTRS_o_ai vector signed int vec_reve(vector signed int __a) { + return __builtin_shufflevector(__a, __a, 3, 2, 1, 0); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_reve(vector unsigned int __a) { + return __builtin_shufflevector(__a, __a, 3, 2, 1, 0); +} + +static inline __ATTRS_o_ai vector bool short vec_reve(vector bool short __a) { + return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0); +} + +static inline __ATTRS_o_ai vector signed short +vec_reve(vector signed short __a) { + return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_reve(vector unsigned short __a) { + return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0); +} + +static inline __ATTRS_o_ai vector float vec_reve(vector float __a) { + return __builtin_shufflevector(__a, __a, 3, 2, 1, 0); +} + +#ifdef __VSX__ +static inline __ATTRS_o_ai vector bool long long +vec_reve(vector bool long long __a) { + return __builtin_shufflevector(__a, __a, 1, 0); +} + +static inline __ATTRS_o_ai vector signed long long +vec_reve(vector signed long long __a) { + return __builtin_shufflevector(__a, __a, 1, 0); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_reve(vector unsigned long long __a) { + return __builtin_shufflevector(__a, __a, 1, 0); +} + +static inline __ATTRS_o_ai vector double vec_reve(vector double __a) { + return __builtin_shufflevector(__a, __a, 1, 0); +} +#endif + +/* vec_revb */ +static __inline__ vector bool char __ATTRS_o_ai +vec_revb(vector bool char __a) { + return __a; +} + +static __inline__ vector signed char __ATTRS_o_ai +vec_revb(vector signed char __a) { + return __a; +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_revb(vector unsigned char __a) { + return __a; +} + +static __inline__ vector bool short __ATTRS_o_ai +vec_revb(vector bool short __a) { + vector unsigned char __indices = + { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 }; + return vec_perm(__a, __a, __indices); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_revb(vector signed short __a) { + vector unsigned char __indices = + { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 }; + return vec_perm(__a, __a, __indices); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_revb(vector unsigned short __a) { + vector unsigned char __indices = + { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 }; + return vec_perm(__a, __a, __indices); +} + +static __inline__ vector bool int __ATTRS_o_ai +vec_revb(vector bool int __a) { + vector unsigned char __indices = + { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 }; + return vec_perm(__a, __a, __indices); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_revb(vector signed int __a) { + vector unsigned char __indices = + { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 }; + return vec_perm(__a, __a, __indices); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_revb(vector unsigned int __a) { + vector unsigned char __indices = + { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 }; + return vec_perm(__a, __a, __indices); +} + +static __inline__ vector float __ATTRS_o_ai +vec_revb(vector float __a) { + vector unsigned char __indices = + { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 }; + return vec_perm(__a, __a, __indices); +} + +#ifdef __VSX__ +static __inline__ vector bool long long __ATTRS_o_ai +vec_revb(vector bool long long __a) { + vector unsigned char __indices = + { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 }; + return vec_perm(__a, __a, __indices); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_revb(vector signed long long __a) { + vector unsigned char __indices = + { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 }; + return vec_perm(__a, __a, __indices); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_revb(vector unsigned long long __a) { + vector unsigned char __indices = + { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 }; + return vec_perm(__a, __a, __indices); +} + +static __inline__ vector double __ATTRS_o_ai +vec_revb(vector double __a) { + vector unsigned char __indices = + { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 }; + return vec_perm(__a, __a, __indices); +} +#endif /* End __VSX__ */ + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_revb(vector signed __int128 __a) { + vector unsigned char __indices = + { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 }; + return (vector signed __int128)vec_perm((vector signed int)__a, + (vector signed int)__a, + __indices); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_revb(vector unsigned __int128 __a) { + vector unsigned char __indices = + { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 }; + return (vector unsigned __int128)vec_perm((vector signed int)__a, + (vector signed int)__a, + __indices); +} +#endif /* END __POWER8_VECTOR__ && __powerpc64__ */ + +/* vec_xl */ + +static inline __ATTRS_o_ai vector signed char vec_xl(signed long long __offset, + signed char *__ptr) { + return *(vector signed char *)(__ptr + __offset); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_xl(signed long long __offset, unsigned char *__ptr) { + return *(vector unsigned char *)(__ptr + __offset); +} + +static inline __ATTRS_o_ai vector signed short vec_xl(signed long long __offset, + signed short *__ptr) { + return *(vector signed short *)(__ptr + __offset); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_xl(signed long long __offset, unsigned short *__ptr) { + return *(vector unsigned short *)(__ptr + __offset); +} + +static inline __ATTRS_o_ai vector signed int vec_xl(signed long long __offset, + signed int *__ptr) { + return *(vector signed int *)(__ptr + __offset); +} + +static inline __ATTRS_o_ai vector unsigned int vec_xl(signed long long __offset, + unsigned int *__ptr) { + return *(vector unsigned int *)(__ptr + __offset); +} + +static inline __ATTRS_o_ai vector float vec_xl(signed long long __offset, + float *__ptr) { + return *(vector float *)(__ptr + __offset); +} + +#ifdef __VSX__ +static inline __ATTRS_o_ai vector signed long long +vec_xl(signed long long __offset, signed long long *__ptr) { + return *(vector signed long long *)(__ptr + __offset); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_xl(signed long long __offset, unsigned long long *__ptr) { + return *(vector unsigned long long *)(__ptr + __offset); +} + +static inline __ATTRS_o_ai vector double vec_xl(signed long long __offset, + double *__ptr) { + return *(vector double *)(__ptr + __offset); +} +#endif + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) +static inline __ATTRS_o_ai vector signed __int128 +vec_xl(signed long long __offset, signed __int128 *__ptr) { + return *(vector signed __int128 *)(__ptr + __offset); +} + +static inline __ATTRS_o_ai vector unsigned __int128 +vec_xl(signed long long __offset, unsigned __int128 *__ptr) { + return *(vector unsigned __int128 *)(__ptr + __offset); +} +#endif + +/* vec_xl_be */ + +#ifdef __LITTLE_ENDIAN__ +static __inline__ vector signed char __ATTRS_o_ai +vec_xl_be(signed long long __offset, signed char *__ptr) { + vector signed char __vec = __builtin_vsx_lxvd2x_be(__offset, __ptr); + return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, + 13, 12, 11, 10, 9, 8); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_xl_be(signed long long __offset, unsigned char *__ptr) { + vector unsigned char __vec = __builtin_vsx_lxvd2x_be(__offset, __ptr); + return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, + 13, 12, 11, 10, 9, 8); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_xl_be(signed long long __offset, signed short *__ptr) { + vector signed short __vec = __builtin_vsx_lxvd2x_be(__offset, __ptr); + return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_xl_be(signed long long __offset, unsigned short *__ptr) { + vector unsigned short __vec = __builtin_vsx_lxvd2x_be(__offset, __ptr); + return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4); +} + +static __inline__ vector signed int __ATTRS_o_ai +vec_xl_be(signed long long __offset, signed int *__ptr) { + return (vector signed int)__builtin_vsx_lxvw4x_be(__offset, __ptr); +} + +static __inline__ vector unsigned int __ATTRS_o_ai +vec_xl_be(signed long long __offset, unsigned int *__ptr) { + return (vector unsigned int)__builtin_vsx_lxvw4x_be(__offset, __ptr); +} + +static __inline__ vector float __ATTRS_o_ai +vec_xl_be(signed long long __offset, float *__ptr) { + return (vector float)__builtin_vsx_lxvw4x_be(__offset, __ptr); +} + +#ifdef __VSX__ +static __inline__ vector signed long long __ATTRS_o_ai +vec_xl_be(signed long long __offset, signed long long *__ptr) { + return (vector signed long long)__builtin_vsx_lxvd2x_be(__offset, __ptr); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_xl_be(signed long long __offset, unsigned long long *__ptr) { + return (vector unsigned long long)__builtin_vsx_lxvd2x_be(__offset, __ptr); +} + +static __inline__ vector double __ATTRS_o_ai +vec_xl_be(signed long long __offset, double *__ptr) { + return (vector double)__builtin_vsx_lxvd2x_be(__offset, __ptr); +} +#endif + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_xl_be(signed long long __offset, signed __int128 *__ptr) { + return vec_xl(__offset, __ptr); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_xl_be(signed long long __offset, unsigned __int128 *__ptr) { + return vec_xl(__offset, __ptr); +} +#endif +#else + #define vec_xl_be vec_xl +#endif + +/* vec_xst */ + +static inline __ATTRS_o_ai void vec_xst(vector signed char __vec, + signed long long __offset, + signed char *__ptr) { + *(vector signed char *)(__ptr + __offset) = __vec; +} + +static inline __ATTRS_o_ai void vec_xst(vector unsigned char __vec, + signed long long __offset, + unsigned char *__ptr) { + *(vector unsigned char *)(__ptr + __offset) = __vec; +} + +static inline __ATTRS_o_ai void vec_xst(vector signed short __vec, + signed long long __offset, + signed short *__ptr) { + *(vector signed short *)(__ptr + __offset) = __vec; +} + +static inline __ATTRS_o_ai void vec_xst(vector unsigned short __vec, + signed long long __offset, + unsigned short *__ptr) { + *(vector unsigned short *)(__ptr + __offset) = __vec; +} + +static inline __ATTRS_o_ai void vec_xst(vector signed int __vec, + signed long long __offset, + signed int *__ptr) { + *(vector signed int *)(__ptr + __offset) = __vec; +} + +static inline __ATTRS_o_ai void vec_xst(vector unsigned int __vec, + signed long long __offset, + unsigned int *__ptr) { + *(vector unsigned int *)(__ptr + __offset) = __vec; +} + +static inline __ATTRS_o_ai void vec_xst(vector float __vec, + signed long long __offset, + float *__ptr) { + *(vector float *)(__ptr + __offset) = __vec; +} + +#ifdef __VSX__ +static inline __ATTRS_o_ai void vec_xst(vector signed long long __vec, + signed long long __offset, + signed long long *__ptr) { + *(vector signed long long *)(__ptr + __offset) = __vec; +} + +static inline __ATTRS_o_ai void vec_xst(vector unsigned long long __vec, + signed long long __offset, + unsigned long long *__ptr) { + *(vector unsigned long long *)(__ptr + __offset) = __vec; +} + +static inline __ATTRS_o_ai void vec_xst(vector double __vec, + signed long long __offset, + double *__ptr) { + *(vector double *)(__ptr + __offset) = __vec; +} +#endif + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) +static inline __ATTRS_o_ai void vec_xst(vector signed __int128 __vec, + signed long long __offset, + signed __int128 *__ptr) { + *(vector signed __int128 *)(__ptr + __offset) = __vec; +} + +static inline __ATTRS_o_ai void vec_xst(vector unsigned __int128 __vec, + signed long long __offset, + unsigned __int128 *__ptr) { + *(vector unsigned __int128 *)(__ptr + __offset) = __vec; +} +#endif + +/* vec_xst_be */ + +#ifdef __LITTLE_ENDIAN__ +static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed char __vec, + signed long long __offset, + signed char *__ptr) { + vector signed char __tmp = + __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, + 13, 12, 11, 10, 9, 8); + __builtin_vsx_stxvd2x_be(__tmp, __offset, __ptr); +} + +static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned char __vec, + signed long long __offset, + unsigned char *__ptr) { + vector unsigned char __tmp = + __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, + 13, 12, 11, 10, 9, 8); + __builtin_vsx_stxvd2x_be(__tmp, __offset, __ptr); +} + +static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed short __vec, + signed long long __offset, + signed short *__ptr) { + vector signed short __tmp = + __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4); + __builtin_vsx_stxvd2x_be(__tmp, __offset, __ptr); +} + +static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned short __vec, + signed long long __offset, + unsigned short *__ptr) { + vector unsigned short __tmp = + __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4); + __builtin_vsx_stxvd2x_be(__tmp, __offset, __ptr); +} + +static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed int __vec, + signed long long __offset, + signed int *__ptr) { + __builtin_vsx_stxvw4x_be(__vec, __offset, __ptr); +} + +static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned int __vec, + signed long long __offset, + unsigned int *__ptr) { + __builtin_vsx_stxvw4x_be(__vec, __offset, __ptr); +} + +static __inline__ void __ATTRS_o_ai vec_xst_be(vector float __vec, + signed long long __offset, + float *__ptr) { + __builtin_vsx_stxvw4x_be(__vec, __offset, __ptr); +} + +#ifdef __VSX__ +static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed long long __vec, + signed long long __offset, + signed long long *__ptr) { + __builtin_vsx_stxvd2x_be(__vec, __offset, __ptr); +} + +static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned long long __vec, + signed long long __offset, + unsigned long long *__ptr) { + __builtin_vsx_stxvd2x_be(__vec, __offset, __ptr); +} + +static __inline__ void __ATTRS_o_ai vec_xst_be(vector double __vec, + signed long long __offset, + double *__ptr) { + __builtin_vsx_stxvd2x_be(__vec, __offset, __ptr); +} +#endif + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) +static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed __int128 __vec, + signed long long __offset, + signed __int128 *__ptr) { + vec_xst(__vec, __offset, __ptr); +} + +static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned __int128 __vec, + signed long long __offset, + unsigned __int128 *__ptr) { + vec_xst(__vec, __offset, __ptr); +} +#endif +#else + #define vec_xst_be vec_xst +#endif + +#ifdef __POWER9_VECTOR__ +#define vec_test_data_class(__a, __b) \ + _Generic((__a), \ + vector float: \ + (vector bool int)__builtin_vsx_xvtstdcsp((__a), (__b)), \ + vector double: \ + (vector bool long long)__builtin_vsx_xvtstdcdp((__a), (__b)) \ + ) + +#endif /* #ifdef __POWER9_VECTOR__ */ + +static vector float __ATTRS_o_ai vec_neg(vector float __a) { + return -__a; +} + +#ifdef __VSX__ +static vector double __ATTRS_o_ai vec_neg(vector double __a) { + return -__a; +} + +#endif + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) +static vector long long __ATTRS_o_ai vec_neg(vector long long __a) { + return -__a; +} +#endif + +static vector signed int __ATTRS_o_ai vec_neg(vector signed int __a) { + return -__a; +} + +static vector signed short __ATTRS_o_ai vec_neg(vector signed short __a) { + return -__a; +} + +static vector signed char __ATTRS_o_ai vec_neg(vector signed char __a) { + return -__a; +} + +static vector float __ATTRS_o_ai vec_nabs(vector float __a) { + return - vec_abs(__a); +} + +#ifdef __VSX__ +static vector double __ATTRS_o_ai vec_nabs(vector double __a) { + return - vec_abs(__a); +} + +#endif + +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) +static vector long long __ATTRS_o_ai vec_nabs(vector long long __a) { + return __builtin_altivec_vminsd(__a, -__a); +} +#endif + +static vector signed int __ATTRS_o_ai vec_nabs(vector signed int __a) { + return __builtin_altivec_vminsw(__a, -__a); +} + +static vector signed short __ATTRS_o_ai vec_nabs(vector signed short __a) { + return __builtin_altivec_vminsh(__a, -__a); +} + +static vector signed char __ATTRS_o_ai vec_nabs(vector signed char __a) { + return __builtin_altivec_vminsb(__a, -__a); +} +#undef __ATTRS_o_ai + +#endif /* __ALTIVEC_H */ diff --git a/c_headers/ammintrin.h b/c_headers/ammintrin.h index 4d0e770ff..2843a7a26 100644 --- a/c_headers/ammintrin.h +++ b/c_headers/ammintrin.h @@ -24,27 +24,21 @@ #ifndef __AMMINTRIN_H #define __AMMINTRIN_H -#ifndef __SSE4A__ -#error "SSE4A instruction set not enabled" -#else - #include /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4a"))) /// \brief Extracts the specified bits from the lower 64 bits of the 128-bit -/// integer vector operand at the index idx and of the length len. +/// integer vector operand at the index \a idx and of the length \a len. /// /// \headerfile /// -/// \code +/// \code /// __m128i _mm_extracti_si64(__m128i x, const int len, const int idx); -/// \endcode +/// \endcode /// -/// \code -/// This intrinsic corresponds to the \c EXTRQ instruction. -/// \endcode +/// This intrinsic corresponds to the EXTRQ instruction. /// /// \param x /// The value from which bits are extracted. @@ -52,11 +46,11 @@ /// Bits [5:0] specify the length; the other bits are ignored. If bits [5:0] /// are zero, the length is interpreted as 64. /// \param idx -/// Bits [5:0] specify the index of the least significant bit; the other -/// bits are ignored. If the sum of the index and length is greater than -/// 64, the result is undefined. If the length and index are both zero, -/// bits [63:0] of parameter x are extracted. If the length is zero -/// but the index is non-zero, the result is undefined. +/// Bits [5:0] specify the index of the least significant bit; the other +/// bits are ignored. If the sum of the index and length is greater than 64, +/// the result is undefined. If the length and index are both zero, bits +/// [63:0] of parameter \a x are extracted. If the length is zero but the +/// index is non-zero, the result is undefined. /// \returns A 128-bit integer vector whose lower 64 bits contain the bits /// extracted from the source operand. #define _mm_extracti_si64(x, len, idx) \ @@ -64,25 +58,23 @@ (char)(len), (char)(idx))) /// \brief Extracts the specified bits from the lower 64 bits of the 128-bit -/// integer vector operand at the index and of the length specified by __y. +/// integer vector operand at the index and of the length specified by +/// \a __y. /// /// \headerfile /// -/// \code -/// This intrinsic corresponds to the \c EXTRQ instruction. -/// \endcode +/// This intrinsic corresponds to the EXTRQ instruction. /// /// \param __x /// The value from which bits are extracted. /// \param __y -/// Specifies the index of the least significant bit at [13:8] -/// and the length at [5:0]; all other bits are ignored. -/// If bits [5:0] are zero, the length is interpreted as 64. -/// If the sum of the index and length is greater than 64, the result is -/// undefined. If the length and index are both zero, bits [63:0] of -/// parameter __x are extracted. If the length is zero but the index is -/// non-zero, the result is undefined. -/// \returns A 128-bit vector whose lower 64 bits contain the bits extracted +/// Specifies the index of the least significant bit at [13:8] and the +/// length at [5:0]; all other bits are ignored. If bits [5:0] are zero, the +/// length is interpreted as 64. If the sum of the index and length is +/// greater than 64, the result is undefined. If the length and index are +/// both zero, bits [63:0] of parameter \a __x are extracted. If the length +/// is zero but the index is non-zero, the result is undefined. +/// \returns A 128-bit vector whose lower 64 bits contain the bits extracted /// from the source operand. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_extract_si64(__m128i __x, __m128i __y) @@ -90,97 +82,88 @@ _mm_extract_si64(__m128i __x, __m128i __y) return (__m128i)__builtin_ia32_extrq((__v2di)__x, (__v16qi)__y); } -/// \brief Inserts bits of a specified length from the source integer vector -/// y into the lower 64 bits of the destination integer vector x at the -/// index idx and of the length len. +/// \brief Inserts bits of a specified length from the source integer vector +/// \a y into the lower 64 bits of the destination integer vector \a x at +/// the index \a idx and of the length \a len. /// /// \headerfile /// -/// \code +/// \code /// __m128i _mm_inserti_si64(__m128i x, __m128i y, const int len, /// const int idx); -/// \endcode +/// \endcode /// -/// \code -/// This intrinsic corresponds to the \c INSERTQ instruction. -/// \endcode +/// This intrinsic corresponds to the INSERTQ instruction. /// /// \param x -/// The destination operand where bits will be inserted. The inserted bits -/// are defined by the length len and by the index idx specifying the least -/// significant bit. +/// The destination operand where bits will be inserted. The inserted bits +/// are defined by the length \a len and by the index \a idx specifying the +/// least significant bit. /// \param y -/// The source operand containing the bits to be extracted. The extracted -/// bits are the least significant bits of operand y of length len. +/// The source operand containing the bits to be extracted. The extracted +/// bits are the least significant bits of operand \a y of length \a len. /// \param len /// Bits [5:0] specify the length; the other bits are ignored. If bits [5:0] /// are zero, the length is interpreted as 64. /// \param idx -/// Bits [5:0] specify the index of the least significant bit; the other -/// bits are ignored. If the sum of the index and length is greater than -/// 64, the result is undefined. If the length and index are both zero, -/// bits [63:0] of parameter y are inserted into parameter x. If the -/// length is zero but the index is non-zero, the result is undefined. -/// \returns A 128-bit integer vector containing the original lower 64-bits -/// of destination operand x with the specified bitfields replaced by the -/// lower bits of source operand y. The upper 64 bits of the return value +/// Bits [5:0] specify the index of the least significant bit; the other +/// bits are ignored. If the sum of the index and length is greater than 64, +/// the result is undefined. If the length and index are both zero, bits +/// [63:0] of parameter \a y are inserted into parameter \a x. If the length +/// is zero but the index is non-zero, the result is undefined. +/// \returns A 128-bit integer vector containing the original lower 64-bits of +/// destination operand \a x with the specified bitfields replaced by the +/// lower bits of source operand \a y. The upper 64 bits of the return value /// are undefined. - #define _mm_inserti_si64(x, y, len, idx) \ ((__m128i)__builtin_ia32_insertqi((__v2di)(__m128i)(x), \ (__v2di)(__m128i)(y), \ (char)(len), (char)(idx))) -/// \brief Inserts bits of a specified length from the source integer vector -/// __y into the lower 64 bits of the destination integer vector __x at -/// the index and of the length specified by __y. +/// \brief Inserts bits of a specified length from the source integer vector +/// \a __y into the lower 64 bits of the destination integer vector \a __x +/// at the index and of the length specified by \a __y. /// /// \headerfile /// -/// \code -/// This intrinsic corresponds to the \c INSERTQ instruction. -/// \endcode +/// This intrinsic corresponds to the INSERTQ instruction. /// /// \param __x -/// The destination operand where bits will be inserted. The inserted bits -/// are defined by the length and by the index of the least significant bit -/// specified by operand __y. +/// The destination operand where bits will be inserted. The inserted bits +/// are defined by the length and by the index of the least significant bit +/// specified by operand \a __y. /// \param __y -/// The source operand containing the bits to be extracted. The extracted -/// bits are the least significant bits of operand __y with length specified -/// by bits [69:64]. These are inserted into the destination at the index -/// specified by bits [77:72]; all other bits are ignored. -/// If bits [69:64] are zero, the length is interpreted as 64. -/// If the sum of the index and length is greater than 64, the result is -/// undefined. If the length and index are both zero, bits [63:0] of -/// parameter __y are inserted into parameter __x. If the length -/// is zero but the index is non-zero, the result is undefined. -/// \returns A 128-bit integer vector containing the original lower 64-bits -/// of destination operand __x with the specified bitfields replaced by the -/// lower bits of source operand __y. The upper 64 bits of the return value -/// are undefined. - +/// The source operand containing the bits to be extracted. The extracted +/// bits are the least significant bits of operand \a __y with length +/// specified by bits [69:64]. These are inserted into the destination at the +/// index specified by bits [77:72]; all other bits are ignored. If bits +/// [69:64] are zero, the length is interpreted as 64. If the sum of the +/// index and length is greater than 64, the result is undefined. If the +/// length and index are both zero, bits [63:0] of parameter \a __y are +/// inserted into parameter \a __x. If the length is zero but the index is +/// non-zero, the result is undefined. +/// \returns A 128-bit integer vector containing the original lower 64-bits of +/// destination operand \a __x with the specified bitfields replaced by the +/// lower bits of source operand \a __y. The upper 64 bits of the return +/// value are undefined. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_insert_si64(__m128i __x, __m128i __y) { return (__m128i)__builtin_ia32_insertq((__v2di)__x, (__v2di)__y); } -/// \brief Stores a 64-bit double-precision value in a 64-bit memory location. +/// \brief Stores a 64-bit double-precision value in a 64-bit memory location. /// To minimize caching, the data is flagged as non-temporal (unlikely to be /// used again soon). /// /// \headerfile /// -/// \code -/// This intrinsic corresponds to the \c MOVNTSD instruction. -/// \endcode +/// This intrinsic corresponds to the MOVNTSD instruction. /// /// \param __p /// The 64-bit memory location used to store the register value. /// \param __a -/// The 64-bit double-precision floating-point register value to -/// be stored. +/// The 64-bit double-precision floating-point register value to be stored. static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_sd(double *__p, __m128d __a) { @@ -193,15 +176,12 @@ _mm_stream_sd(double *__p, __m128d __a) /// /// \headerfile /// -/// \code -/// This intrinsic corresponds to the \c MOVNTSS instruction. -/// \endcode +/// This intrinsic corresponds to the MOVNTSS instruction. /// /// \param __p /// The 32-bit memory location used to store the register value. /// \param __a -/// The 32-bit single-precision floating-point register value to -/// be stored. +/// The 32-bit single-precision floating-point register value to be stored. static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_ss(float *__p, __m128 __a) { @@ -210,6 +190,4 @@ _mm_stream_ss(float *__p, __m128 __a) #undef __DEFAULT_FN_ATTRS -#endif /* __SSE4A__ */ - #endif /* __AMMINTRIN_H */ diff --git a/c_headers/arm_acle.h b/c_headers/arm_acle.h index 73a7e76ce..8423e62a3 100644 --- a/c_headers/arm_acle.h +++ b/c_headers/arm_acle.h @@ -72,9 +72,11 @@ static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(v /* 8.5 Swap */ static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) - __swp(uint32_t x, volatile uint32_t *p) { +__swp(uint32_t __x, volatile uint32_t *__p) { uint32_t v; - do v = __builtin_arm_ldrex(p); while (__builtin_arm_strex(x, p)); + do + v = __builtin_arm_ldrex(__p); + while (__builtin_arm_strex(__x, __p)); return v; } @@ -110,109 +112,115 @@ static __inline__ void __attribute__((__always_inline__, __nodebug__)) __nop(voi /* 9.2 Miscellaneous data-processing intrinsics */ /* ROR */ static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) - __ror(uint32_t x, uint32_t y) { - y %= 32; - if (y == 0) return x; - return (x >> y) | (x << (32 - y)); +__ror(uint32_t __x, uint32_t __y) { + __y %= 32; + if (__y == 0) + return __x; + return (__x >> __y) | (__x << (32 - __y)); } static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) - __rorll(uint64_t x, uint32_t y) { - y %= 64; - if (y == 0) return x; - return (x >> y) | (x << (64 - y)); +__rorll(uint64_t __x, uint32_t __y) { + __y %= 64; + if (__y == 0) + return __x; + return (__x >> __y) | (__x << (64 - __y)); } static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) - __rorl(unsigned long x, uint32_t y) { +__rorl(unsigned long __x, uint32_t __y) { #if __SIZEOF_LONG__ == 4 - return __ror(x, y); + return __ror(__x, __y); #else - return __rorll(x, y); + return __rorll(__x, __y); #endif } /* CLZ */ static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) - __clz(uint32_t t) { - return __builtin_clz(t); +__clz(uint32_t __t) { + return __builtin_clz(__t); } static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) - __clzl(unsigned long t) { - return __builtin_clzl(t); +__clzl(unsigned long __t) { + return __builtin_clzl(__t); } static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) - __clzll(uint64_t t) { - return __builtin_clzll(t); +__clzll(uint64_t __t) { + return __builtin_clzll(__t); } /* REV */ static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) - __rev(uint32_t t) { - return __builtin_bswap32(t); +__rev(uint32_t __t) { + return __builtin_bswap32(__t); } static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) - __revl(unsigned long t) { +__revl(unsigned long __t) { #if __SIZEOF_LONG__ == 4 - return __builtin_bswap32(t); + return __builtin_bswap32(__t); #else - return __builtin_bswap64(t); + return __builtin_bswap64(__t); #endif } static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) - __revll(uint64_t t) { - return __builtin_bswap64(t); +__revll(uint64_t __t) { + return __builtin_bswap64(__t); } /* REV16 */ static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) - __rev16(uint32_t t) { - return __ror(__rev(t), 16); -} - -static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) - __rev16l(unsigned long t) { - return __rorl(__revl(t), sizeof(long) / 2); +__rev16(uint32_t __t) { + return __ror(__rev(__t), 16); } static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) - __rev16ll(uint64_t t) { - return __rorll(__revll(t), 32); +__rev16ll(uint64_t __t) { + return (((uint64_t)__rev16(__t >> 32)) << 32) | __rev16(__t); +} + +static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) +__rev16l(unsigned long __t) { +#if __SIZEOF_LONG__ == 4 + return __rev16(__t); +#else + return __rev16ll(__t); +#endif } /* REVSH */ static __inline__ int16_t __attribute__((__always_inline__, __nodebug__)) - __revsh(int16_t t) { - return __builtin_bswap16(t); +__revsh(int16_t __t) { + return __builtin_bswap16(__t); } /* RBIT */ static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) - __rbit(uint32_t t) { - return __builtin_arm_rbit(t); +__rbit(uint32_t __t) { + return __builtin_arm_rbit(__t); } static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) - __rbitll(uint64_t t) { +__rbitll(uint64_t __t) { #if __ARM_32BIT_STATE - return (((uint64_t) __builtin_arm_rbit(t)) << 32) | - __builtin_arm_rbit(t >> 32); + return (((uint64_t)__builtin_arm_rbit(__t)) << 32) | + __builtin_arm_rbit(__t >> 32); #else - return __builtin_arm_rbit64(t); + return __builtin_arm_rbit64(__t); #endif } static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) - __rbitl(unsigned long t) { +__rbitl(unsigned long __t) { #if __SIZEOF_LONG__ == 4 - return __rbit(t); + return __rbit(__t); #else - return __rbitll(t); + return __rbitll(__t); #endif } @@ -231,61 +239,61 @@ static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) /* 9.4.2 Saturating addition and subtraction intrinsics */ #if __ARM_32BIT_STATE static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) - __qadd(int32_t t, int32_t v) { - return __builtin_arm_qadd(t, v); +__qadd(int32_t __t, int32_t __v) { + return __builtin_arm_qadd(__t, __v); } static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) - __qsub(int32_t t, int32_t v) { - return __builtin_arm_qsub(t, v); +__qsub(int32_t __t, int32_t __v) { + return __builtin_arm_qsub(__t, __v); } static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) -__qdbl(int32_t t) { - return __builtin_arm_qadd(t, t); +__qdbl(int32_t __t) { + return __builtin_arm_qadd(__t, __t); } #endif /* 9.7 CRC32 intrinsics */ #if __ARM_FEATURE_CRC32 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) - __crc32b(uint32_t a, uint8_t b) { - return __builtin_arm_crc32b(a, b); +__crc32b(uint32_t __a, uint8_t __b) { + return __builtin_arm_crc32b(__a, __b); } static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) - __crc32h(uint32_t a, uint16_t b) { - return __builtin_arm_crc32h(a, b); +__crc32h(uint32_t __a, uint16_t __b) { + return __builtin_arm_crc32h(__a, __b); } static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) - __crc32w(uint32_t a, uint32_t b) { - return __builtin_arm_crc32w(a, b); +__crc32w(uint32_t __a, uint32_t __b) { + return __builtin_arm_crc32w(__a, __b); } static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) - __crc32d(uint32_t a, uint64_t b) { - return __builtin_arm_crc32d(a, b); +__crc32d(uint32_t __a, uint64_t __b) { + return __builtin_arm_crc32d(__a, __b); } static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) - __crc32cb(uint32_t a, uint8_t b) { - return __builtin_arm_crc32cb(a, b); +__crc32cb(uint32_t __a, uint8_t __b) { + return __builtin_arm_crc32cb(__a, __b); } static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) - __crc32ch(uint32_t a, uint16_t b) { - return __builtin_arm_crc32ch(a, b); +__crc32ch(uint32_t __a, uint16_t __b) { + return __builtin_arm_crc32ch(__a, __b); } static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) - __crc32cw(uint32_t a, uint32_t b) { - return __builtin_arm_crc32cw(a, b); +__crc32cw(uint32_t __a, uint32_t __b) { + return __builtin_arm_crc32cw(__a, __b); } static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) - __crc32cd(uint32_t a, uint64_t b) { - return __builtin_arm_crc32cd(a, b); +__crc32cd(uint32_t __a, uint64_t __b) { + return __builtin_arm_crc32cd(__a, __b); } #endif diff --git a/c_headers/arm_neon.h b/c_headers/arm_neon.h new file mode 100644 index 000000000..f5ca59bb3 --- /dev/null +++ b/c_headers/arm_neon.h @@ -0,0 +1,69231 @@ +/*===---- arm_neon.h - ARM Neon intrinsics ---------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ARM_NEON_H +#define __ARM_NEON_H + +#if !defined(__ARM_NEON) +#error "NEON support not enabled" +#endif + +#include + +typedef float float32_t; +typedef __fp16 float16_t; +#ifdef __aarch64__ +typedef double float64_t; +#endif + +#ifdef __aarch64__ +typedef uint8_t poly8_t; +typedef uint16_t poly16_t; +typedef uint64_t poly64_t; +typedef __uint128_t poly128_t; +#else +typedef int8_t poly8_t; +typedef int16_t poly16_t; +#endif +typedef __attribute__((neon_vector_type(8))) int8_t int8x8_t; +typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t; +typedef __attribute__((neon_vector_type(4))) int16_t int16x4_t; +typedef __attribute__((neon_vector_type(8))) int16_t int16x8_t; +typedef __attribute__((neon_vector_type(2))) int32_t int32x2_t; +typedef __attribute__((neon_vector_type(4))) int32_t int32x4_t; +typedef __attribute__((neon_vector_type(1))) int64_t int64x1_t; +typedef __attribute__((neon_vector_type(2))) int64_t int64x2_t; +typedef __attribute__((neon_vector_type(8))) uint8_t uint8x8_t; +typedef __attribute__((neon_vector_type(16))) uint8_t uint8x16_t; +typedef __attribute__((neon_vector_type(4))) uint16_t uint16x4_t; +typedef __attribute__((neon_vector_type(8))) uint16_t uint16x8_t; +typedef __attribute__((neon_vector_type(2))) uint32_t uint32x2_t; +typedef __attribute__((neon_vector_type(4))) uint32_t uint32x4_t; +typedef __attribute__((neon_vector_type(1))) uint64_t uint64x1_t; +typedef __attribute__((neon_vector_type(2))) uint64_t uint64x2_t; +typedef __attribute__((neon_vector_type(4))) float16_t float16x4_t; +typedef __attribute__((neon_vector_type(8))) float16_t float16x8_t; +typedef __attribute__((neon_vector_type(2))) float32_t float32x2_t; +typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t; +#ifdef __aarch64__ +typedef __attribute__((neon_vector_type(1))) float64_t float64x1_t; +typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t; +#endif +typedef __attribute__((neon_polyvector_type(8))) poly8_t poly8x8_t; +typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t; +typedef __attribute__((neon_polyvector_type(4))) poly16_t poly16x4_t; +typedef __attribute__((neon_polyvector_type(8))) poly16_t poly16x8_t; +#ifdef __aarch64__ +typedef __attribute__((neon_polyvector_type(1))) poly64_t poly64x1_t; +typedef __attribute__((neon_polyvector_type(2))) poly64_t poly64x2_t; +#endif + +typedef struct int8x8x2_t { + int8x8_t val[2]; +} int8x8x2_t; + +typedef struct int8x16x2_t { + int8x16_t val[2]; +} int8x16x2_t; + +typedef struct int16x4x2_t { + int16x4_t val[2]; +} int16x4x2_t; + +typedef struct int16x8x2_t { + int16x8_t val[2]; +} int16x8x2_t; + +typedef struct int32x2x2_t { + int32x2_t val[2]; +} int32x2x2_t; + +typedef struct int32x4x2_t { + int32x4_t val[2]; +} int32x4x2_t; + +typedef struct int64x1x2_t { + int64x1_t val[2]; +} int64x1x2_t; + +typedef struct int64x2x2_t { + int64x2_t val[2]; +} int64x2x2_t; + +typedef struct uint8x8x2_t { + uint8x8_t val[2]; +} uint8x8x2_t; + +typedef struct uint8x16x2_t { + uint8x16_t val[2]; +} uint8x16x2_t; + +typedef struct uint16x4x2_t { + uint16x4_t val[2]; +} uint16x4x2_t; + +typedef struct uint16x8x2_t { + uint16x8_t val[2]; +} uint16x8x2_t; + +typedef struct uint32x2x2_t { + uint32x2_t val[2]; +} uint32x2x2_t; + +typedef struct uint32x4x2_t { + uint32x4_t val[2]; +} uint32x4x2_t; + +typedef struct uint64x1x2_t { + uint64x1_t val[2]; +} uint64x1x2_t; + +typedef struct uint64x2x2_t { + uint64x2_t val[2]; +} uint64x2x2_t; + +typedef struct float16x4x2_t { + float16x4_t val[2]; +} float16x4x2_t; + +typedef struct float16x8x2_t { + float16x8_t val[2]; +} float16x8x2_t; + +typedef struct float32x2x2_t { + float32x2_t val[2]; +} float32x2x2_t; + +typedef struct float32x4x2_t { + float32x4_t val[2]; +} float32x4x2_t; + +#ifdef __aarch64__ +typedef struct float64x1x2_t { + float64x1_t val[2]; +} float64x1x2_t; + +typedef struct float64x2x2_t { + float64x2_t val[2]; +} float64x2x2_t; + +#endif +typedef struct poly8x8x2_t { + poly8x8_t val[2]; +} poly8x8x2_t; + +typedef struct poly8x16x2_t { + poly8x16_t val[2]; +} poly8x16x2_t; + +typedef struct poly16x4x2_t { + poly16x4_t val[2]; +} poly16x4x2_t; + +typedef struct poly16x8x2_t { + poly16x8_t val[2]; +} poly16x8x2_t; + +#ifdef __aarch64__ +typedef struct poly64x1x2_t { + poly64x1_t val[2]; +} poly64x1x2_t; + +typedef struct poly64x2x2_t { + poly64x2_t val[2]; +} poly64x2x2_t; + +#endif +typedef struct int8x8x3_t { + int8x8_t val[3]; +} int8x8x3_t; + +typedef struct int8x16x3_t { + int8x16_t val[3]; +} int8x16x3_t; + +typedef struct int16x4x3_t { + int16x4_t val[3]; +} int16x4x3_t; + +typedef struct int16x8x3_t { + int16x8_t val[3]; +} int16x8x3_t; + +typedef struct int32x2x3_t { + int32x2_t val[3]; +} int32x2x3_t; + +typedef struct int32x4x3_t { + int32x4_t val[3]; +} int32x4x3_t; + +typedef struct int64x1x3_t { + int64x1_t val[3]; +} int64x1x3_t; + +typedef struct int64x2x3_t { + int64x2_t val[3]; +} int64x2x3_t; + +typedef struct uint8x8x3_t { + uint8x8_t val[3]; +} uint8x8x3_t; + +typedef struct uint8x16x3_t { + uint8x16_t val[3]; +} uint8x16x3_t; + +typedef struct uint16x4x3_t { + uint16x4_t val[3]; +} uint16x4x3_t; + +typedef struct uint16x8x3_t { + uint16x8_t val[3]; +} uint16x8x3_t; + +typedef struct uint32x2x3_t { + uint32x2_t val[3]; +} uint32x2x3_t; + +typedef struct uint32x4x3_t { + uint32x4_t val[3]; +} uint32x4x3_t; + +typedef struct uint64x1x3_t { + uint64x1_t val[3]; +} uint64x1x3_t; + +typedef struct uint64x2x3_t { + uint64x2_t val[3]; +} uint64x2x3_t; + +typedef struct float16x4x3_t { + float16x4_t val[3]; +} float16x4x3_t; + +typedef struct float16x8x3_t { + float16x8_t val[3]; +} float16x8x3_t; + +typedef struct float32x2x3_t { + float32x2_t val[3]; +} float32x2x3_t; + +typedef struct float32x4x3_t { + float32x4_t val[3]; +} float32x4x3_t; + +#ifdef __aarch64__ +typedef struct float64x1x3_t { + float64x1_t val[3]; +} float64x1x3_t; + +typedef struct float64x2x3_t { + float64x2_t val[3]; +} float64x2x3_t; + +#endif +typedef struct poly8x8x3_t { + poly8x8_t val[3]; +} poly8x8x3_t; + +typedef struct poly8x16x3_t { + poly8x16_t val[3]; +} poly8x16x3_t; + +typedef struct poly16x4x3_t { + poly16x4_t val[3]; +} poly16x4x3_t; + +typedef struct poly16x8x3_t { + poly16x8_t val[3]; +} poly16x8x3_t; + +#ifdef __aarch64__ +typedef struct poly64x1x3_t { + poly64x1_t val[3]; +} poly64x1x3_t; + +typedef struct poly64x2x3_t { + poly64x2_t val[3]; +} poly64x2x3_t; + +#endif +typedef struct int8x8x4_t { + int8x8_t val[4]; +} int8x8x4_t; + +typedef struct int8x16x4_t { + int8x16_t val[4]; +} int8x16x4_t; + +typedef struct int16x4x4_t { + int16x4_t val[4]; +} int16x4x4_t; + +typedef struct int16x8x4_t { + int16x8_t val[4]; +} int16x8x4_t; + +typedef struct int32x2x4_t { + int32x2_t val[4]; +} int32x2x4_t; + +typedef struct int32x4x4_t { + int32x4_t val[4]; +} int32x4x4_t; + +typedef struct int64x1x4_t { + int64x1_t val[4]; +} int64x1x4_t; + +typedef struct int64x2x4_t { + int64x2_t val[4]; +} int64x2x4_t; + +typedef struct uint8x8x4_t { + uint8x8_t val[4]; +} uint8x8x4_t; + +typedef struct uint8x16x4_t { + uint8x16_t val[4]; +} uint8x16x4_t; + +typedef struct uint16x4x4_t { + uint16x4_t val[4]; +} uint16x4x4_t; + +typedef struct uint16x8x4_t { + uint16x8_t val[4]; +} uint16x8x4_t; + +typedef struct uint32x2x4_t { + uint32x2_t val[4]; +} uint32x2x4_t; + +typedef struct uint32x4x4_t { + uint32x4_t val[4]; +} uint32x4x4_t; + +typedef struct uint64x1x4_t { + uint64x1_t val[4]; +} uint64x1x4_t; + +typedef struct uint64x2x4_t { + uint64x2_t val[4]; +} uint64x2x4_t; + +typedef struct float16x4x4_t { + float16x4_t val[4]; +} float16x4x4_t; + +typedef struct float16x8x4_t { + float16x8_t val[4]; +} float16x8x4_t; + +typedef struct float32x2x4_t { + float32x2_t val[4]; +} float32x2x4_t; + +typedef struct float32x4x4_t { + float32x4_t val[4]; +} float32x4x4_t; + +#ifdef __aarch64__ +typedef struct float64x1x4_t { + float64x1_t val[4]; +} float64x1x4_t; + +typedef struct float64x2x4_t { + float64x2_t val[4]; +} float64x2x4_t; + +#endif +typedef struct poly8x8x4_t { + poly8x8_t val[4]; +} poly8x8x4_t; + +typedef struct poly8x16x4_t { + poly8x16_t val[4]; +} poly8x16x4_t; + +typedef struct poly16x4x4_t { + poly16x4_t val[4]; +} poly16x4x4_t; + +typedef struct poly16x8x4_t { + poly16x8_t val[4]; +} poly16x8x4_t; + +#ifdef __aarch64__ +typedef struct poly64x1x4_t { + poly64x1_t val[4]; +} poly64x1x4_t; + +typedef struct poly64x2x4_t { + poly64x2_t val[4]; +} poly64x2x4_t; + +#endif + +#define __ai static inline __attribute__((__always_inline__, __nodebug__)) + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x16_t __noswap_vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint16x8_t __noswap_vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x16_t __noswap_vabdq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vabdq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vabdq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x8_t __noswap_vabd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vabd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vabd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vabsq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai int8x16_t vabsq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vabsq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vabsq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vabsq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vabsq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vabsq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vabsq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vabs_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vabs_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vabs_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vabs_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vabs_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vabs_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vabs_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vabs_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x8_t __noswap_vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) { + poly16x4_t __ret; + __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 5); + return __ret; +} +#else +__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + poly16x4_t __ret; + __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36); + return __ret; +} +#else +__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) { + poly16x8_t __ret; + __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 37); + return __ret; +} +#else +__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 37); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); + return __ret; +} +#else +__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); + return __ret; +} +#else +__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); + return __ret; +} +#else +__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); + return __ret; +} +#else +__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); + return __ret; +} +#else +__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19); + return __ret; +} +#else +__ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 17); + return __ret; +} +#else +__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#else +__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3); + return __ret; +} +#else +__ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); + return __ret; +} +#else +__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vclsq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai int8x16_t vclsq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vclsq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vclsq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vclsq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vclsq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vcls_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vcls_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vcls_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vcls_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vcls_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vcls_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vclzq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vclzq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vclzq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vclzq_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vclzq_u16(uint16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vclzq_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vclzq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai int8x16_t vclzq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vclzq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vclzq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vclzq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vclzq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vclz_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vclz_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vclz_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vclz_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vclz_u16(uint16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vclz_u16(uint16x4_t __p0) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vclz_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vclz_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vclz_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vclz_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vclz_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vclz_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vcnt_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 4); + return __ret; +} +#else +__ai poly8x8_t vcnt_p8(poly8x8_t __p0) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vcntq_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 36); + return __ret; +} +#else +__ai poly8x16_t vcntq_p8(poly8x16_t __p0) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcntq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vcntq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vcntq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai int8x16_t vcntq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcnt_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vcnt_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vcnt_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vcnt_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#else +__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#else +__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x16_t __noswap_vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} +#else +__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + return __ret; +} +#else +__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint16x8_t __noswap_vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#else +__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x16_t __noswap_vcombine_s8(int8x8_t __p0, int8x8_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} +#else +__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float32x4_t __noswap_vcombine_f32(float32x2_t __p0, float32x2_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai float16x8_t __noswap_vcombine_f16(float16x4_t __p0, float16x4_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} +#else +__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vcombine_s32(int32x2_t __p0, int32x2_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + return __ret; +} +#else +__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vcombine_s16(int16x4_t __p0, int16x4_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vcreate_p8(uint64_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vcreate_p8(uint64_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vcreate_p16(uint64_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vcreate_p16(uint64_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcreate_u8(uint64_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vcreate_u8(uint64_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcreate_u32(uint64_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vcreate_u32(uint64_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcreate_u64(uint64_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vcreate_u64(uint64_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcreate_u16(uint64_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vcreate_u16(uint64_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vcreate_s8(uint64_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vcreate_s8(uint64_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vcreate_f32(uint64_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vcreate_f32(uint64_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vcreate_f16(uint64_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vcreate_f16(uint64_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vcreate_s32(uint64_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vcreate_s32(uint64_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vcreate_s64(uint64_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vcreate_s64(uint64_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vcreate_s16(uint64_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vcreate_s16(uint64_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdup_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdup_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + poly16x4_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x16_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x8_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + poly16x8_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x16_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x4_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x4_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x2_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x2_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x8_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x16_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x4_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x4_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x4_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x4_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x2_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_lane_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x2_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x8_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdup_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ + __ret; \ +}) +#else +#define vdup_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1); \ + __ret; \ +}) +#else +#define vdup_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdup_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdup_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ + __ret; \ +}) +#else +#define vdup_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ + __ret; \ +}) +#else +#define vdup_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1); \ + __ret; \ +}) +#else +#define vdup_lane_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdup_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vdup_n_p8(poly8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai poly8x8_t vdup_n_p8(poly8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vdup_n_p16(poly16_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai poly16x4_t vdup_n_p16(poly16_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vdupq_n_p8(poly8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai poly8x16_t vdupq_n_p8(poly8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vdupq_n_p16(poly16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai poly16x8_t vdupq_n_p16(poly16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vdupq_n_u8(uint8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint8x16_t vdupq_n_u8(uint8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vdupq_n_u32(uint32_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint32x4_t vdupq_n_u32(uint32_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vdupq_n_u64(uint64_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai uint64x2_t vdupq_n_u64(uint64_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vdupq_n_u16(uint16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint16x8_t vdupq_n_u16(uint16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vdupq_n_s8(int8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int8x16_t vdupq_n_s8(int8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vdupq_n_f32(float32_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai float32x4_t vdupq_n_f32(float32_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_n_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x8_t __ret; \ + __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ + __ret; \ +}) +#else +#define vdupq_n_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x8_t __ret; \ + __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vdupq_n_s32(int32_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int32x4_t vdupq_n_s32(int32_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vdupq_n_s64(int64_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai int64x2_t vdupq_n_s64(int64_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vdupq_n_s16(int16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int16x8_t vdupq_n_s16(int16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vdup_n_u8(uint8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint8x8_t vdup_n_u8(uint8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vdup_n_u32(uint32_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai uint32x2_t vdup_n_u32(uint32_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vdup_n_u64(uint64_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) {__p0}; + return __ret; +} +#else +__ai uint64x1_t vdup_n_u64(uint64_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) {__p0}; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vdup_n_u16(uint16_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint16x4_t vdup_n_u16(uint16_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vdup_n_s8(int8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int8x8_t vdup_n_s8(int8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vdup_n_f32(float32_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai float32x2_t vdup_n_f32(float32_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_n_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x4_t __ret; \ + __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ + __ret; \ +}) +#else +#define vdup_n_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x4_t __ret; \ + __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vdup_n_s32(int32_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai int32x2_t vdup_n_s32(int32_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vdup_n_s64(int64_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) {__p0}; + return __ret; +} +#else +__ai int64x1_t vdup_n_s64(int64_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) {__p0}; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vdup_n_s16(int16_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int16x4_t vdup_n_s16(int16_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \ + __ret; \ +}) +#else +#define vext_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \ + __ret; \ +}) +#else +#define vext_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \ + __ret; \ +}) +#else +#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \ + __ret; \ +}) +#else +#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ +}) +#else +#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ +}) +#else +#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ +}) +#else +#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ +}) +#else +#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 41); \ + __ret; \ +}) +#else +#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ +}) +#else +#define vext_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ +}) +#else +#define vext_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#else +#define vext_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ +}) +#else +#define vext_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ +}) +#else +#define vext_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 9); \ + __ret; \ +}) +#else +#define vext_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vext_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#else +#define vext_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vext_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vget_high_p8(poly8x16_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#else +__ai poly8x8_t vget_high_p8(poly8x16_t __p0) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai poly8x8_t __noswap_vget_high_p8(poly8x16_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vget_high_p16(poly16x8_t __p0) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#else +__ai poly16x4_t vget_high_p16(poly16x8_t __p0) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vget_high_u8(uint8x16_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#else +__ai uint8x8_t vget_high_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vget_high_u8(uint8x16_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vget_high_u32(uint32x4_t __p0) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} +#else +__ai uint32x2_t vget_high_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vget_high_u32(uint32x4_t __p0) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vget_high_u64(uint64x2_t __p0) { + uint64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1); + return __ret; +} +#else +__ai uint64x1_t vget_high_u64(uint64x2_t __p0) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x1_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vget_high_u16(uint16x8_t __p0) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#else +__ai uint16x4_t vget_high_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vget_high_u16(uint16x8_t __p0) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vget_high_s8(int8x16_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#else +__ai int8x8_t vget_high_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x8_t __noswap_vget_high_s8(int8x16_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vget_high_f32(float32x4_t __p0) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} +#else +__ai float32x2_t vget_high_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float32x2_t __noswap_vget_high_f32(float32x4_t __p0) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vget_high_f16(float16x8_t __p0) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#else +__ai float16x4_t vget_high_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float16x4_t __noswap_vget_high_f16(float16x8_t __p0) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vget_high_s32(int32x4_t __p0) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} +#else +__ai int32x2_t vget_high_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vget_high_s32(int32x4_t __p0) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vget_high_s64(int64x2_t __p0) { + int64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1); + return __ret; +} +#else +__ai int64x1_t vget_high_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x1_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vget_high_s16(int16x8_t __p0) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#else +__ai int16x4_t vget_high_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vget_high_s16(int16x8_t __p0) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8_t __ret; \ + __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8_t __ret; \ + __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8_t __ret; \ + __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16_t __ret; \ + __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + poly16_t __ret; \ + __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16_t __ret; \ + __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8_t __ret; \ + __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8_t __ret; \ + __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8_t __ret; \ + __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16_t __ret; \ + __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16_t __ret; \ + __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16_t __ret; \ + __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vget_low_p8(poly8x16_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai poly8x8_t vget_low_p8(poly8x16_t __p0) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vget_low_p16(poly16x8_t __p0) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#else +__ai poly16x4_t vget_low_p16(poly16x8_t __p0) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vget_low_u8(uint8x16_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai uint8x8_t vget_low_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vget_low_u32(uint32x4_t __p0) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1); + return __ret; +} +#else +__ai uint32x2_t vget_low_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vget_low_u64(uint64x2_t __p0) { + uint64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0); + return __ret; +} +#else +__ai uint64x1_t vget_low_u64(uint64x2_t __p0) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x1_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vget_low_u16(uint16x8_t __p0) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#else +__ai uint16x4_t vget_low_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vget_low_s8(int8x16_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai int8x8_t vget_low_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vget_low_f32(float32x4_t __p0) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1); + return __ret; +} +#else +__ai float32x2_t vget_low_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vget_low_f16(float16x8_t __p0) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#else +__ai float16x4_t vget_low_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vget_low_s32(int32x4_t __p0) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1); + return __ret; +} +#else +__ai int32x2_t vget_low_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vget_low_s64(int64x2_t __p0) { + int64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0); + return __ret; +} +#else +__ai int64x1_t vget_low_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x1_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vget_low_s16(int16x8_t __p0) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#else +__ai int16x4_t vget_low_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p8(__p0) __extension__ ({ \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \ + __ret; \ +}) +#else +#define vld1_p8(__p0) __extension__ ({ \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p16(__p0) __extension__ ({ \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \ + __ret; \ +}) +#else +#define vld1_p16(__p0) __extension__ ({ \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p8(__p0) __extension__ ({ \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \ + __ret; \ +}) +#else +#define vld1q_p8(__p0) __extension__ ({ \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p16(__p0) __extension__ ({ \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \ + __ret; \ +}) +#else +#define vld1q_p16(__p0) __extension__ ({ \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u8(__p0) __extension__ ({ \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \ + __ret; \ +}) +#else +#define vld1q_u8(__p0) __extension__ ({ \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u32(__p0) __extension__ ({ \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \ + __ret; \ +}) +#else +#define vld1q_u32(__p0) __extension__ ({ \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u64(__p0) __extension__ ({ \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \ + __ret; \ +}) +#else +#define vld1q_u64(__p0) __extension__ ({ \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u16(__p0) __extension__ ({ \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \ + __ret; \ +}) +#else +#define vld1q_u16(__p0) __extension__ ({ \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s8(__p0) __extension__ ({ \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \ + __ret; \ +}) +#else +#define vld1q_s8(__p0) __extension__ ({ \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f32(__p0) __extension__ ({ \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \ + __ret; \ +}) +#else +#define vld1q_f32(__p0) __extension__ ({ \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \ + __ret; \ +}) +#else +#define vld1q_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s32(__p0) __extension__ ({ \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \ + __ret; \ +}) +#else +#define vld1q_s32(__p0) __extension__ ({ \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s64(__p0) __extension__ ({ \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \ + __ret; \ +}) +#else +#define vld1q_s64(__p0) __extension__ ({ \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s16(__p0) __extension__ ({ \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \ + __ret; \ +}) +#else +#define vld1q_s16(__p0) __extension__ ({ \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u8(__p0) __extension__ ({ \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \ + __ret; \ +}) +#else +#define vld1_u8(__p0) __extension__ ({ \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u32(__p0) __extension__ ({ \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \ + __ret; \ +}) +#else +#define vld1_u32(__p0) __extension__ ({ \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u64(__p0) __extension__ ({ \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \ + __ret; \ +}) +#else +#define vld1_u64(__p0) __extension__ ({ \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u16(__p0) __extension__ ({ \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \ + __ret; \ +}) +#else +#define vld1_u16(__p0) __extension__ ({ \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s8(__p0) __extension__ ({ \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \ + __ret; \ +}) +#else +#define vld1_s8(__p0) __extension__ ({ \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f32(__p0) __extension__ ({ \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \ + __ret; \ +}) +#else +#define vld1_f32(__p0) __extension__ ({ \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \ + __ret; \ +}) +#else +#define vld1_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s32(__p0) __extension__ ({ \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \ + __ret; \ +}) +#else +#define vld1_s32(__p0) __extension__ ({ \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s64(__p0) __extension__ ({ \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \ + __ret; \ +}) +#else +#define vld1_s64(__p0) __extension__ ({ \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s16(__p0) __extension__ ({ \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \ + __ret; \ +}) +#else +#define vld1_s16(__p0) __extension__ ({ \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_p8(__p0) __extension__ ({ \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \ + __ret; \ +}) +#else +#define vld1_dup_p8(__p0) __extension__ ({ \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_p16(__p0) __extension__ ({ \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \ + __ret; \ +}) +#else +#define vld1_dup_p16(__p0) __extension__ ({ \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_p8(__p0) __extension__ ({ \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \ + __ret; \ +}) +#else +#define vld1q_dup_p8(__p0) __extension__ ({ \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_p16(__p0) __extension__ ({ \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \ + __ret; \ +}) +#else +#define vld1q_dup_p16(__p0) __extension__ ({ \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_u8(__p0) __extension__ ({ \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \ + __ret; \ +}) +#else +#define vld1q_dup_u8(__p0) __extension__ ({ \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_u32(__p0) __extension__ ({ \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \ + __ret; \ +}) +#else +#define vld1q_dup_u32(__p0) __extension__ ({ \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_u64(__p0) __extension__ ({ \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \ + __ret; \ +}) +#else +#define vld1q_dup_u64(__p0) __extension__ ({ \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_u16(__p0) __extension__ ({ \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \ + __ret; \ +}) +#else +#define vld1q_dup_u16(__p0) __extension__ ({ \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_s8(__p0) __extension__ ({ \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \ + __ret; \ +}) +#else +#define vld1q_dup_s8(__p0) __extension__ ({ \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_f32(__p0) __extension__ ({ \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \ + __ret; \ +}) +#else +#define vld1q_dup_f32(__p0) __extension__ ({ \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \ + __ret; \ +}) +#else +#define vld1q_dup_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_s32(__p0) __extension__ ({ \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \ + __ret; \ +}) +#else +#define vld1q_dup_s32(__p0) __extension__ ({ \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_s64(__p0) __extension__ ({ \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \ + __ret; \ +}) +#else +#define vld1q_dup_s64(__p0) __extension__ ({ \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_s16(__p0) __extension__ ({ \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \ + __ret; \ +}) +#else +#define vld1q_dup_s16(__p0) __extension__ ({ \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_u8(__p0) __extension__ ({ \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \ + __ret; \ +}) +#else +#define vld1_dup_u8(__p0) __extension__ ({ \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_u32(__p0) __extension__ ({ \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \ + __ret; \ +}) +#else +#define vld1_dup_u32(__p0) __extension__ ({ \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_u64(__p0) __extension__ ({ \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \ + __ret; \ +}) +#else +#define vld1_dup_u64(__p0) __extension__ ({ \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_u16(__p0) __extension__ ({ \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \ + __ret; \ +}) +#else +#define vld1_dup_u16(__p0) __extension__ ({ \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_s8(__p0) __extension__ ({ \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \ + __ret; \ +}) +#else +#define vld1_dup_s8(__p0) __extension__ ({ \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_f32(__p0) __extension__ ({ \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \ + __ret; \ +}) +#else +#define vld1_dup_f32(__p0) __extension__ ({ \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \ + __ret; \ +}) +#else +#define vld1_dup_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_s32(__p0) __extension__ ({ \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \ + __ret; \ +}) +#else +#define vld1_dup_s32(__p0) __extension__ ({ \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_s64(__p0) __extension__ ({ \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \ + __ret; \ +}) +#else +#define vld1_dup_s64(__p0) __extension__ ({ \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_s16(__p0) __extension__ ({ \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \ + __ret; \ +}) +#else +#define vld1_dup_s16(__p0) __extension__ ({ \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \ + __ret; \ +}) +#else +#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \ + __ret; \ +}) +#else +#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \ + __ret; \ +}) +#else +#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \ + __ret; \ +}) +#else +#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ +}) +#else +#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ +}) +#else +#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ +}) +#else +#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s1 = __p1; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ +}) +#else +#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s1 = __p1; \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \ + __ret; \ +}) +#else +#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __s1 = __p1; \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \ + __ret; \ +}) +#else +#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __s1 = __p1; \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ +}) +#else +#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ +}) +#else +#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s1 = __p1; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#else +#define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s1 = __p1; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ +}) +#else +#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s1 = __p1; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ +}) +#else +#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s1 = __p1; \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \ + __ret; \ +}) +#else +#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __s1 = __p1; \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \ + __ret; \ +}) +#else +#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __s1 = __p1; \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s1 = __p1; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#else +#define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s1 = __p1; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_p8(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld2_p8(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_p16(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld2_p16(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_p8(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld2q_p8(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_p16(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld2q_p16(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_u8(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld2q_u8(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_u32(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld2q_u32(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_u16(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld2q_u16(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_s8(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld2q_s8(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_f32(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld2q_f32(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_f16(__p0) __extension__ ({ \ + float16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld2q_f16(__p0) __extension__ ({ \ + float16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_s32(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld2q_s32(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_s16(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld2q_s16(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_u8(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld2_u8(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_u32(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld2_u32(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_u64(__p0) __extension__ ({ \ + uint64x1x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 19); \ + __ret; \ +}) +#else +#define vld2_u64(__p0) __extension__ ({ \ + uint64x1x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_u16(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld2_u16(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_s8(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld2_s8(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_f32(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld2_f32(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_f16(__p0) __extension__ ({ \ + float16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld2_f16(__p0) __extension__ ({ \ + float16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_s32(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld2_s32(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_s64(__p0) __extension__ ({ \ + int64x1x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 3); \ + __ret; \ +}) +#else +#define vld2_s64(__p0) __extension__ ({ \ + int64x1x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_s16(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld2_s16(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_p8(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld2_dup_p8(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_p16(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld2_dup_p16(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_u8(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld2_dup_u8(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_u32(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld2_dup_u32(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_u64(__p0) __extension__ ({ \ + uint64x1x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \ + __ret; \ +}) +#else +#define vld2_dup_u64(__p0) __extension__ ({ \ + uint64x1x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_u16(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld2_dup_u16(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_s8(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld2_dup_s8(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_f32(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld2_dup_f32(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_f16(__p0) __extension__ ({ \ + float16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld2_dup_f16(__p0) __extension__ ({ \ + float16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_s32(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld2_dup_s32(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_s64(__p0) __extension__ ({ \ + int64x1x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \ + __ret; \ +}) +#else +#define vld2_dup_s64(__p0) __extension__ ({ \ + int64x1x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_s16(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld2_dup_s16(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + poly8x8x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \ + __ret; \ +}) +#else +#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + poly8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + poly16x4x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \ + __ret; \ +}) +#else +#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + poly16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + poly16x4x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + poly16x8x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \ + __ret; \ +}) +#else +#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + poly16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + uint32x4x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \ + __ret; \ +}) +#else +#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + uint32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + uint32x4x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + uint16x8x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \ + __ret; \ +}) +#else +#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + uint16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + float32x4x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 41); \ + __ret; \ +}) +#else +#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + float32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + float32x4x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + float16x8x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 40); \ + __ret; \ +}) +#else +#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + float16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + int32x4x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 34); \ + __ret; \ +}) +#else +#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + int32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + int32x4x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + int16x8x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 33); \ + __ret; \ +}) +#else +#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + int16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + uint8x8x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \ + __ret; \ +}) +#else +#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + uint8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + uint32x2x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \ + __ret; \ +}) +#else +#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + uint32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + uint32x2x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + uint16x4x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \ + __ret; \ +}) +#else +#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + uint16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + uint16x4x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + int8x8x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \ + __ret; \ +}) +#else +#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + int8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + float32x2x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 9); \ + __ret; \ +}) +#else +#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + float32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + float32x2x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + float16x4x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 8); \ + __ret; \ +}) +#else +#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + float16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + float16x4x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + int32x2x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 2); \ + __ret; \ +}) +#else +#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + int32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + int32x2x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + int16x4x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 1); \ + __ret; \ +}) +#else +#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + int16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + int16x4x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_p8(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld3_p8(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_p16(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld3_p16(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_p8(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld3q_p8(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_p16(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld3q_p16(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_u8(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld3q_u8(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_u32(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld3q_u32(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_u16(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld3q_u16(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_s8(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld3q_s8(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_f32(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld3q_f32(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_f16(__p0) __extension__ ({ \ + float16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld3q_f16(__p0) __extension__ ({ \ + float16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_s32(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld3q_s32(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_s16(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld3q_s16(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_u8(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld3_u8(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_u32(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld3_u32(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_u64(__p0) __extension__ ({ \ + uint64x1x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 19); \ + __ret; \ +}) +#else +#define vld3_u64(__p0) __extension__ ({ \ + uint64x1x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_u16(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld3_u16(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_s8(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld3_s8(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_f32(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld3_f32(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_f16(__p0) __extension__ ({ \ + float16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld3_f16(__p0) __extension__ ({ \ + float16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_s32(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld3_s32(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_s64(__p0) __extension__ ({ \ + int64x1x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 3); \ + __ret; \ +}) +#else +#define vld3_s64(__p0) __extension__ ({ \ + int64x1x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_s16(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld3_s16(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_p8(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld3_dup_p8(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_p16(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld3_dup_p16(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_u8(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld3_dup_u8(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_u32(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld3_dup_u32(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_u64(__p0) __extension__ ({ \ + uint64x1x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \ + __ret; \ +}) +#else +#define vld3_dup_u64(__p0) __extension__ ({ \ + uint64x1x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_u16(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld3_dup_u16(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_s8(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld3_dup_s8(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_f32(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld3_dup_f32(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_f16(__p0) __extension__ ({ \ + float16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld3_dup_f16(__p0) __extension__ ({ \ + float16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_s32(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld3_dup_s32(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_s64(__p0) __extension__ ({ \ + int64x1x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \ + __ret; \ +}) +#else +#define vld3_dup_s64(__p0) __extension__ ({ \ + int64x1x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_s16(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld3_dup_s16(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + poly8x8x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \ + __ret; \ +}) +#else +#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + poly8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + poly16x4x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \ + __ret; \ +}) +#else +#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + poly16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + poly16x4x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + poly16x8x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \ + __ret; \ +}) +#else +#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + poly16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + uint32x4x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \ + __ret; \ +}) +#else +#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + uint32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + uint32x4x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + uint16x8x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \ + __ret; \ +}) +#else +#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + uint16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + float32x4x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 41); \ + __ret; \ +}) +#else +#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + float32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + float32x4x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + float16x8x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 40); \ + __ret; \ +}) +#else +#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + float16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + int32x4x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 34); \ + __ret; \ +}) +#else +#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + int32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + int32x4x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + int16x8x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 33); \ + __ret; \ +}) +#else +#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + int16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + uint8x8x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \ + __ret; \ +}) +#else +#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + uint8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + uint32x2x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \ + __ret; \ +}) +#else +#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + uint32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + uint32x2x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + uint16x4x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \ + __ret; \ +}) +#else +#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + uint16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + uint16x4x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + int8x8x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \ + __ret; \ +}) +#else +#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + int8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + float32x2x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 9); \ + __ret; \ +}) +#else +#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + float32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + float32x2x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + float16x4x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 8); \ + __ret; \ +}) +#else +#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + float16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + float16x4x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + int32x2x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 2); \ + __ret; \ +}) +#else +#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + int32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + int32x2x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + int16x4x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 1); \ + __ret; \ +}) +#else +#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + int16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + int16x4x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_p8(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld4_p8(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_p16(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld4_p16(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_p8(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld4q_p8(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_p16(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld4q_p16(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_u8(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld4q_u8(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_u32(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld4q_u32(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_u16(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld4q_u16(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_s8(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld4q_s8(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_f32(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld4q_f32(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_f16(__p0) __extension__ ({ \ + float16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld4q_f16(__p0) __extension__ ({ \ + float16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_s32(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld4q_s32(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_s16(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld4q_s16(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_u8(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld4_u8(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_u32(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld4_u32(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_u64(__p0) __extension__ ({ \ + uint64x1x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 19); \ + __ret; \ +}) +#else +#define vld4_u64(__p0) __extension__ ({ \ + uint64x1x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_u16(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld4_u16(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_s8(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld4_s8(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_f32(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld4_f32(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_f16(__p0) __extension__ ({ \ + float16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld4_f16(__p0) __extension__ ({ \ + float16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_s32(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld4_s32(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_s64(__p0) __extension__ ({ \ + int64x1x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 3); \ + __ret; \ +}) +#else +#define vld4_s64(__p0) __extension__ ({ \ + int64x1x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_s16(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld4_s16(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_p8(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld4_dup_p8(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_p16(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld4_dup_p16(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_u8(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld4_dup_u8(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_u32(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld4_dup_u32(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_u64(__p0) __extension__ ({ \ + uint64x1x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \ + __ret; \ +}) +#else +#define vld4_dup_u64(__p0) __extension__ ({ \ + uint64x1x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_u16(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld4_dup_u16(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_s8(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld4_dup_s8(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_f32(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld4_dup_f32(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_f16(__p0) __extension__ ({ \ + float16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld4_dup_f16(__p0) __extension__ ({ \ + float16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_s32(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld4_dup_s32(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_s64(__p0) __extension__ ({ \ + int64x1x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \ + __ret; \ +}) +#else +#define vld4_dup_s64(__p0) __extension__ ({ \ + int64x1x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_s16(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld4_dup_s16(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + poly8x8x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \ + __ret; \ +}) +#else +#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + poly8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + poly16x4x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \ + __ret; \ +}) +#else +#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + poly16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + poly16x4x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + poly16x8x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \ + __ret; \ +}) +#else +#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + poly16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + uint32x4x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \ + __ret; \ +}) +#else +#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + uint32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + uint32x4x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + uint16x8x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \ + __ret; \ +}) +#else +#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + uint16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + float32x4x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 41); \ + __ret; \ +}) +#else +#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + float32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + float32x4x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + float16x8x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 40); \ + __ret; \ +}) +#else +#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + float16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + int32x4x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 34); \ + __ret; \ +}) +#else +#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + int32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + int32x4x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + int16x8x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 33); \ + __ret; \ +}) +#else +#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + int16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + uint8x8x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \ + __ret; \ +}) +#else +#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + uint8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + uint32x2x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \ + __ret; \ +}) +#else +#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + uint32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + uint32x2x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + uint16x4x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \ + __ret; \ +}) +#else +#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + uint16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + uint16x4x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + int8x8x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \ + __ret; \ +}) +#else +#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + int8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + float32x2x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 9); \ + __ret; \ +}) +#else +#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + float32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + float32x2x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + float16x4x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 8); \ + __ret; \ +}) +#else +#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + float16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + float16x4x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + int32x2x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 2); \ + __ret; \ +}) +#else +#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + int32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + int32x2x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + int16x4x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 1); \ + __ret; \ +}) +#else +#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + int16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + int16x4x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint32x2_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float32x2_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int32x2_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x2_t __s2 = __p2; \ + uint32x4_t __ret; \ + __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmlaq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x2_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + uint32x4_t __ret; \ + __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x4_t __s2 = __p2; \ + uint16x8_t __ret; \ + __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmlaq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x4_t __s2 = __p2; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x4_t __ret; \ + __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmlaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + float32x4_t __ret; \ + __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x4_t __ret; \ + __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmlaq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + int32x4_t __ret; \ + __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x8_t __ret; \ + __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmlaq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __s2 = __p2; \ + uint32x2_t __ret; \ + __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \ + __ret; \ +}) +#else +#define vmla_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __s2 = __p2; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + uint32x2_t __ret; \ + __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __s2 = __p2; \ + uint16x4_t __ret; \ + __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmla_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __s2 = __p2; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x2_t __ret; \ + __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \ + __ret; \ +}) +#else +#define vmla_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + float32x2_t __ret; \ + __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x2_t __ret; \ + __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \ + __ret; \ +}) +#else +#define vmla_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + int32x2_t __ret; \ + __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x4_t __ret; \ + __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmla_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint32x4_t __ret; + __ret = __p0 + __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 + __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint16x8_t __ret; + __ret = __p0 + __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 + __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + __ret = __p0 + __p1 * (float32x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __rev0 + __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { + int32x4_t __ret; + __ret = __p0 + __p1 * (int32x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 + __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { + int16x8_t __ret; + __ret = __p0 + __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 + __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint32x2_t __ret; + __ret = __p0 + __p1 * (uint32x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __rev0 + __rev1 * (uint32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint16x4_t __ret; + __ret = __p0 + __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 + __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + __ret = __p0 + __p1 * (float32x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __rev0 + __rev1 * (float32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { + int32x2_t __ret; + __ret = __p0 + __p1 * (int32x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __rev0 + __rev1 * (int32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { + int16x4_t __ret; + __ret = __p0 + __p1 * (int16x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 + __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint32x2_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float32x2_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int32x2_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x2_t __s2 = __p2; \ + uint32x4_t __ret; \ + __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmlsq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x2_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + uint32x4_t __ret; \ + __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x4_t __s2 = __p2; \ + uint16x8_t __ret; \ + __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmlsq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x4_t __s2 = __p2; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x4_t __ret; \ + __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmlsq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + float32x4_t __ret; \ + __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x4_t __ret; \ + __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmlsq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + int32x4_t __ret; \ + __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x8_t __ret; \ + __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmlsq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __s2 = __p2; \ + uint32x2_t __ret; \ + __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \ + __ret; \ +}) +#else +#define vmls_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __s2 = __p2; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + uint32x2_t __ret; \ + __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __s2 = __p2; \ + uint16x4_t __ret; \ + __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmls_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __s2 = __p2; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x2_t __ret; \ + __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \ + __ret; \ +}) +#else +#define vmls_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + float32x2_t __ret; \ + __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x2_t __ret; \ + __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \ + __ret; \ +}) +#else +#define vmls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + int32x2_t __ret; \ + __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x4_t __ret; \ + __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmls_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint32x4_t __ret; + __ret = __p0 - __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 - __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint16x8_t __ret; + __ret = __p0 - __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 - __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + __ret = __p0 - __p1 * (float32x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __rev0 - __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { + int32x4_t __ret; + __ret = __p0 - __p1 * (int32x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 - __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { + int16x8_t __ret; + __ret = __p0 - __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 - __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint32x2_t __ret; + __ret = __p0 - __p1 * (uint32x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __rev0 - __rev1 * (uint32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint16x4_t __ret; + __ret = __p0 - __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 - __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + __ret = __p0 - __p1 * (float32x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __rev0 - __rev1 * (float32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { + int32x2_t __ret; + __ret = __p0 - __p1 * (int32x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __rev0 - __rev1 * (int32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { + int16x4_t __ret; + __ret = __p0 - __p1 * (int16x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 - __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vmov_n_p8(poly8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai poly8x8_t vmov_n_p8(poly8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vmov_n_p16(poly16_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai poly16x4_t vmov_n_p16(poly16_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vmovq_n_p8(poly8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai poly8x16_t vmovq_n_p8(poly8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vmovq_n_p16(poly16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai poly16x8_t vmovq_n_p16(poly16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vmovq_n_u8(uint8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint8x16_t vmovq_n_u8(uint8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmovq_n_u32(uint32_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint32x4_t vmovq_n_u32(uint32_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmovq_n_u64(uint64_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai uint64x2_t vmovq_n_u64(uint64_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmovq_n_u16(uint16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint16x8_t vmovq_n_u16(uint16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vmovq_n_s8(int8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int8x16_t vmovq_n_s8(int8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmovq_n_f32(float32_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai float32x4_t vmovq_n_f32(float32_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmovq_n_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x8_t __ret; \ + __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ + __ret; \ +}) +#else +#define vmovq_n_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x8_t __ret; \ + __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmovq_n_s32(int32_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int32x4_t vmovq_n_s32(int32_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmovq_n_s64(int64_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai int64x2_t vmovq_n_s64(int64_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmovq_n_s16(int16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int16x8_t vmovq_n_s16(int16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vmov_n_u8(uint8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint8x8_t vmov_n_u8(uint8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmov_n_u32(uint32_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai uint32x2_t vmov_n_u32(uint32_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vmov_n_u64(uint64_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) {__p0}; + return __ret; +} +#else +__ai uint64x1_t vmov_n_u64(uint64_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) {__p0}; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmov_n_u16(uint16_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai uint16x4_t vmov_n_u16(uint16_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vmov_n_s8(int8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int8x8_t vmov_n_s8(int8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmov_n_f32(float32_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai float32x2_t vmov_n_f32(float32_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmov_n_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x4_t __ret; \ + __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ + __ret; \ +}) +#else +#define vmov_n_f16(__p0) __extension__ ({ \ + float16_t __s0 = __p0; \ + float16x4_t __ret; \ + __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmov_n_s32(int32_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai int32x2_t vmov_n_s32(int32_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vmov_n_s64(int64_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) {__p0}; + return __ret; +} +#else +__ai int64x1_t vmov_n_s64(int64_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) {__p0}; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmov_n_s16(int16_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai int16x4_t vmov_n_s16(int16_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmovl_u8(uint8x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vmovl_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint16x8_t __noswap_vmovl_u8(uint8x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmovl_u32(uint32x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vmovl_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vmovl_u32(uint32x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmovl_u16(uint16x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vmovl_u16(uint16x4_t __p0) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vmovl_u16(uint16x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmovl_s8(int8x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vmovl_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vmovl_s8(int8x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmovl_s32(int32x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vmovl_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vmovl_s32(int32x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmovl_s16(int16x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vmovl_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vmovl_s16(int16x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmovn_u32(uint32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vmovn_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vmovn_u32(uint32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmovn_u64(uint64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vmovn_u64(uint64x2_t __p0) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vmovn_u64(uint64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vmovn_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vmovn_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmovn_s64(int64x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vmovn_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vmovn_s64(int64x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vmovn_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x8_t __noswap_vmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x4_t __ret; \ + __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \ + __ret; \ +}) +#else +#define vmulq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint32x4_t __ret; \ + __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x8_t __ret; \ + __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \ + __ret; \ +}) +#else +#define vmulq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x4_t __ret; \ + __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \ + __ret; \ +}) +#else +#define vmulq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float32x4_t __ret; \ + __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \ + __ret; \ +}) +#else +#define vmulq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x4_t __ret; \ + __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \ + __ret; \ +}) +#else +#define vmulq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __ret; \ + __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \ + __ret; \ +}) +#else +#define vmul_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint32x2_t __ret; \ + __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __ret; \ + __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \ + __ret; \ +}) +#else +#define vmul_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __ret; \ + __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \ + __ret; \ +}) +#else +#define vmul_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float32x2_t __ret; \ + __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \ + __ret; \ +}) +#else +#define vmul_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __ret; \ + __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \ + __ret; \ +}) +#else +#define vmul_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) { + uint32x4_t __ret; + __ret = __p0 * (uint32x4_t) {__p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 * (uint32x4_t) {__p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) { + uint16x8_t __ret; + __ret = __p0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) { + float32x4_t __ret; + __ret = __p0 * (float32x4_t) {__p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __rev0 * (float32x4_t) {__p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __ret; + __ret = __p0 * (int32x4_t) {__p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 * (int32x4_t) {__p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __ret; + __ret = __p0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) { + uint32x2_t __ret; + __ret = __p0 * (uint32x2_t) {__p1, __p1}; + return __ret; +} +#else +__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = __rev0 * (uint32x2_t) {__p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) { + uint16x4_t __ret; + __ret = __p0 * (uint16x4_t) {__p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 * (uint16x4_t) {__p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) { + float32x2_t __ret; + __ret = __p0 * (float32x2_t) {__p1, __p1}; + return __ret; +} +#else +__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = __rev0 * (float32x2_t) {__p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __ret; + __ret = __p0 * (int32x2_t) {__p1, __p1}; + return __ret; +} +#else +__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = __rev0 * (int32x2_t) {__p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __ret; + __ret = __p0 * (int16x4_t) {__p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 * (int16x4_t) {__p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly16x8_t __ret; + __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37); + return __ret; +} +#else +__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 37); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai poly16x8_t __noswap_vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly16x8_t __ret; + __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint16x8_t __noswap_vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vmull_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint64x2_t __ret; \ + __ret = vmull_u32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ + __ret; \ +}) +#else +#define vmull_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint64x2_t __ret; \ + __ret = __noswap_vmull_u32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint32x4_t __ret; \ + __ret = vmull_u16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vmull_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = __noswap_vmull_u16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int64x2_t __ret; \ + __ret = vmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ + __ret; \ +}) +#else +#define vmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int64x2_t __ret; \ + __ret = __noswap_vmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = vmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __noswap_vmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51); + return __ret; +} +#else +__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50); + return __ret; +} +#else +__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35); + return __ret; +} +#else +__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34); + return __ret; +} +#else +__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vmvn_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai poly8x8_t vmvn_p8(poly8x8_t __p0) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) { + uint16x8_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vmvnq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai int8x16_t vmvnq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmvnq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai int32x4_t vmvnq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmvnq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai int16x8_t vmvnq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vmvn_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai uint8x8_t vmvn_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vmvn_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai uint32x2_t vmvn_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vmvn_u16(uint16x4_t __p0) { + uint16x4_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai uint16x4_t vmvn_u16(uint16x4_t __p0) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vmvn_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai int8x8_t vmvn_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vmvn_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai int32x2_t vmvn_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vmvn_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai int16x4_t vmvn_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vnegq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai int8x16_t vnegq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vnegq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai float32x4_t vnegq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vnegq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai int32x4_t vnegq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vnegq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai int16x8_t vnegq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vneg_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai int8x8_t vneg_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vneg_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai float32x2_t vneg_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vneg_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai int32x2_t vneg_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vneg_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai int16x4_t vneg_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#else +__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) { + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#else +__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) { + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vpaddlq_s8(int8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vpaddlq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vpaddlq_s32(int32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vpaddlq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vpaddlq_s16(int16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vpaddlq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 19); + return __ret; +} +#else +__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vpaddl_s8(int8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vpaddl_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vpaddl_s32(int32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 3); + return __ret; +} +#else +__ai int64x1_t vpaddl_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vpaddl_s16(int16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vpaddl_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqabsq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai int8x16_t vqabsq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqabsq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vqabsq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqabsq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vqabsq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqabs_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vqabs_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqabs_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vqabs_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqabs_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vqabs_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vqaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#else +__ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vqadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#else +__ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vqadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); + return __ret; +} +#else +__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); + return __ret; +} +#else +__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int64x2_t __ret; \ + __ret = vqdmlal_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ + __ret; \ +}) +#else +#define vqdmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + int64x2_t __ret; \ + __ret = __noswap_vqdmlal_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int32x4_t __ret; \ + __ret = vqdmlal_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ + __ret; \ +}) +#else +#define vqdmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __noswap_vqdmlal_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35); + return __ret; +} +#else +__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34); + return __ret; +} +#else +__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); + return __ret; +} +#else +__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); + return __ret; +} +#else +__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int64x2_t __ret; \ + __ret = vqdmlsl_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ + __ret; \ +}) +#else +#define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + int64x2_t __ret; \ + __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int32x4_t __ret; \ + __ret = vqdmlsl_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ + __ret; \ +}) +#else +#define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35); + return __ret; +} +#else +__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34); + return __ret; +} +#else +__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = vqdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x4_t __ret; \ + __ret = __noswap_vqdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = vqdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = __noswap_vqdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = vqdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ + __ret; \ +}) +#else +#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __ret; \ + __ret = __noswap_vqdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = vqdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = __noswap_vqdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34); + return __ret; +} +#else +__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33); + return __ret; +} +#else +__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2); + return __ret; +} +#else +__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1); + return __ret; +} +#else +__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int64x2_t __ret; \ + __ret = vqdmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ + __ret; \ +}) +#else +#define vqdmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int64x2_t __ret; \ + __ret = __noswap_vqdmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = vqdmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vqdmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __noswap_vqdmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35); + return __ret; +} +#else +__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34); + return __ret; +} +#else +__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vqmovn_u32(uint32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vqmovn_u64(uint64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vqmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vqmovn_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vqmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqmovn_s64(int64x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vqmovn_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vqmovn_s64(int64x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vqmovn_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x8_t __noswap_vqmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vqmovun_s32(int32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vqmovun_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vqmovun_s32(int32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vqmovun_s64(int64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vqmovun_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vqmovun_s64(int64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqmovun_s16(int16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vqmovun_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vqmovun_s16(int16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqnegq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai int8x16_t vqnegq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqnegq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vqnegq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqnegq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai int16x8_t vqnegq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqneg_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vqneg_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqneg_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vqneg_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqneg_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai int16x4_t vqneg_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = vqrdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x4_t __ret; \ + __ret = __noswap_vqrdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = vqrdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = __noswap_vqrdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = vqrdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ + __ret; \ +}) +#else +#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __ret; \ + __ret = __noswap_vqrdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = vqrdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = __noswap_vqrdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34); + return __ret; +} +#else +__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33); + return __ret; +} +#else +__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2); + return __ret; +} +#else +__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1); + return __ret; +} +#else +__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#else +__ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#else +__ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrun_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrun_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrun_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#else +__ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#else +__ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#else +#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqshl_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqshl_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#else +#define vqshl_n_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqshl_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vqshl_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vqshl_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#else +#define vqshl_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vqshl_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlu_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#else +#define vqshlu_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrun_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrun_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrun_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vqsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vqsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#else +__ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vqsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#else +__ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vqsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x8_t __noswap_vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrecpeq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrecpeq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrecpe_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrecpe_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vrev16_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} +#else +__ai poly8x8_t vrev16_p8(poly8x8_t __p0) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + return __ret; +} +#else +__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + return __ret; +} +#else +__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vrev16q_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + return __ret; +} +#else +__ai int8x16_t vrev16q_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vrev16_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} +#else +__ai uint8x8_t vrev16_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vrev16_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} +#else +__ai int8x8_t vrev16_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vrev32_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai poly8x8_t vrev32_p8(poly8x8_t __p0) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vrev32_p16(poly16x4_t __p0) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} +#else +__ai poly16x4_t vrev32_p16(poly16x4_t __p0) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + return __ret; +} +#else +__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} +#else +__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + return __ret; +} +#else +__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} +#else +__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vrev32q_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + return __ret; +} +#else +__ai int8x16_t vrev32q_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vrev32q_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} +#else +__ai int16x8_t vrev32q_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vrev32_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai uint8x8_t vrev32_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vrev32_u16(uint16x4_t __p0) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} +#else +__ai uint16x4_t vrev32_u16(uint16x4_t __p0) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vrev32_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai int8x8_t vrev32_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vrev32_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} +#else +__ai int16x4_t vrev32_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vrev64_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#else +__ai poly8x8_t vrev64_p8(poly8x8_t __p0) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vrev64_p16(poly16x4_t __p0) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + return __ret; +} +#else +__ai poly16x4_t vrev64_p16(poly16x4_t __p0) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + return __ret; +} +#else +__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + return __ret; +} +#else +__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} +#else +__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vrev64q_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + return __ret; +} +#else +__ai int8x16_t vrev64q_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrev64q_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} +#else +__ai float32x4_t vrev64q_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vrev64q_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} +#else +__ai int32x4_t vrev64q_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vrev64q_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai int16x8_t vrev64q_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vrev64_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#else +__ai uint8x8_t vrev64_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vrev64_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0); + return __ret; +} +#else +__ai uint32x2_t vrev64_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vrev64_u16(uint16x4_t __p0) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + return __ret; +} +#else +__ai uint16x4_t vrev64_u16(uint16x4_t __p0) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vrev64_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#else +__ai int8x8_t vrev64_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrev64_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0); + return __ret; +} +#else +__ai float32x2_t vrev64_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vrev64_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0); + return __ret; +} +#else +__ai int32x2_t vrev64_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vrev64_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + return __ret; +} +#else +__ai int16x4_t vrev64_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#else +__ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#else +__ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#else +#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vrshr_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vrshr_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#else +#define vrshr_n_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vrshr_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vrshr_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vrshr_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#else +#define vrshr_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vrshr_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrsqrte_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrsqrte_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ +}) +#else +#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ +}) +#else +#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ +}) +#else +#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ +}) +#else +#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ +}) +#else +#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ +}) +#else +#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#else +#define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ +}) +#else +#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ +}) +#else +#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#else +#define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x8_t __noswap_vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#define __noswap_vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#define __noswap_vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#else +__ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#else +__ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define vshlq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vshlq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vshlq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vshlq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#else +#define vshlq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vshlq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vshlq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vshlq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vshl_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vshl_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#else +#define vshl_n_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vshl_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vshl_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vshl_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#else +#define vshl_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vshl_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vshll_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vshll_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vshll_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vshll_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vshll_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vshll_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define vshrq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vshrq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vshrq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vshrq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#else +#define vshrq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vshrq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vshrq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vshrq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vshr_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vshr_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#else +#define vshr_n_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vshr_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vshr_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vshr_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#else +#define vshr_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vshr_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vshrn_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vshrn_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vshrn_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \ + __ret; \ +}) +#else +#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \ + __ret; \ +}) +#else +#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \ + __ret; \ +}) +#else +#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \ + __ret; \ +}) +#else +#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ +}) +#else +#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ +}) +#else +#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ +}) +#else +#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ +}) +#else +#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ +}) +#else +#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ +}) +#else +#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#else +#define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ +}) +#else +#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ +}) +#else +#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#else +#define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ +}) +#else +#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ +}) +#else +#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ +}) +#else +#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ +}) +#else +#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ +}) +#else +#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ +}) +#else +#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#else +#define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ +}) +#else +#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ +}) +#else +#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#else +#define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \ + __ret; \ +}) +#else +#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \ + __ret; \ +}) +#else +#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \ + __ret; \ +}) +#else +#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \ + __ret; \ +}) +#else +#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ +}) +#else +#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ +}) +#else +#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ +}) +#else +#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ +}) +#else +#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ +}) +#else +#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ +}) +#else +#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#else +#define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ +}) +#else +#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ +}) +#else +#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#else +#define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 4); \ +}) +#else +#define vst1_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 5); \ +}) +#else +#define vst1_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 36); \ +}) +#else +#define vst1q_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 37); \ +}) +#else +#define vst1q_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 48); \ +}) +#else +#define vst1q_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 50); \ +}) +#else +#define vst1q_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 51); \ +}) +#else +#define vst1q_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 49); \ +}) +#else +#define vst1q_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 32); \ +}) +#else +#define vst1q_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 41); \ +}) +#else +#define vst1q_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 40); \ +}) +#else +#define vst1q_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s1 = __p1; \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 34); \ +}) +#else +#define vst1q_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 35); \ +}) +#else +#define vst1q_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 33); \ +}) +#else +#define vst1q_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 16); \ +}) +#else +#define vst1_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 18); \ +}) +#else +#define vst1_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 18); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \ +}) +#else +#define vst1_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 17); \ +}) +#else +#define vst1_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 0); \ +}) +#else +#define vst1_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 9); \ +}) +#else +#define vst1_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 8); \ +}) +#else +#define vst1_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s1 = __p1; \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 2); \ +}) +#else +#define vst1_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 2); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \ +}) +#else +#define vst1_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 1); \ +}) +#else +#define vst1_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \ +}) +#else +#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \ +}) +#else +#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \ +}) +#else +#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \ +}) +#else +#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \ +}) +#else +#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \ +}) +#else +#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \ +}) +#else +#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \ +}) +#else +#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \ +}) +#else +#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \ +}) +#else +#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \ +}) +#else +#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __s1 = __p1; \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \ +}) +#else +#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \ +}) +#else +#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \ +}) +#else +#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \ +}) +#else +#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \ +}) +#else +#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \ +}) +#else +#define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \ +}) +#else +#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \ +}) +#else +#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \ +}) +#else +#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \ +}) +#else +#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __s1 = __p1; \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \ +}) +#else +#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \ +}) +#else +#define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \ +}) +#else +#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_p8(__p0, __p1) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \ +}) +#else +#define vst2_p8(__p0, __p1) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + poly8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_p16(__p0, __p1) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \ +}) +#else +#define vst2_p16(__p0, __p1) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + poly16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \ +}) +#else +#define vst2q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + poly8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \ +}) +#else +#define vst2q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + poly16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \ +}) +#else +#define vst2q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + uint8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \ +}) +#else +#define vst2q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + uint32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \ +}) +#else +#define vst2q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + uint16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_s8(__p0, __p1) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \ +}) +#else +#define vst2q_s8(__p0, __p1) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + int8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_f32(__p0, __p1) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 41); \ +}) +#else +#define vst2q_f32(__p0, __p1) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + float32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_f16(__p0, __p1) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 40); \ +}) +#else +#define vst2q_f16(__p0, __p1) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + float16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_s32(__p0, __p1) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 34); \ +}) +#else +#define vst2q_s32(__p0, __p1) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + int32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_s16(__p0, __p1) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 33); \ +}) +#else +#define vst2q_s16(__p0, __p1) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + int16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_u8(__p0, __p1) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \ +}) +#else +#define vst2_u8(__p0, __p1) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + uint8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_u32(__p0, __p1) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \ +}) +#else +#define vst2_u32(__p0, __p1) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + uint32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_u64(__p0, __p1) __extension__ ({ \ + uint64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \ +}) +#else +#define vst2_u64(__p0, __p1) __extension__ ({ \ + uint64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_u16(__p0, __p1) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \ +}) +#else +#define vst2_u16(__p0, __p1) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + uint16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_s8(__p0, __p1) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \ +}) +#else +#define vst2_s8(__p0, __p1) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + int8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_f32(__p0, __p1) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 9); \ +}) +#else +#define vst2_f32(__p0, __p1) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + float32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_f16(__p0, __p1) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 8); \ +}) +#else +#define vst2_f16(__p0, __p1) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + float16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_s32(__p0, __p1) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 2); \ +}) +#else +#define vst2_s32(__p0, __p1) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + int32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 2); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_s64(__p0, __p1) __extension__ ({ \ + int64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 3); \ +}) +#else +#define vst2_s64(__p0, __p1) __extension__ ({ \ + int64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 3); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_s16(__p0, __p1) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 1); \ +}) +#else +#define vst2_s16(__p0, __p1) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + int16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \ +}) +#else +#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + poly8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \ +}) +#else +#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + poly16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \ +}) +#else +#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + poly16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \ +}) +#else +#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + uint32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \ +}) +#else +#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + uint16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 41); \ +}) +#else +#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + float32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 40); \ +}) +#else +#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + float16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 34); \ +}) +#else +#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + int32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 33); \ +}) +#else +#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + int16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \ +}) +#else +#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + uint8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \ +}) +#else +#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + uint32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \ +}) +#else +#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + uint16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \ +}) +#else +#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + int8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 9); \ +}) +#else +#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + float32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 8); \ +}) +#else +#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + float16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 2); \ +}) +#else +#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + int32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 2); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 1); \ +}) +#else +#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + int16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_p8(__p0, __p1) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \ +}) +#else +#define vst3_p8(__p0, __p1) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + poly8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_p16(__p0, __p1) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \ +}) +#else +#define vst3_p16(__p0, __p1) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + poly16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \ +}) +#else +#define vst3q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + poly8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \ +}) +#else +#define vst3q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + poly16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \ +}) +#else +#define vst3q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + uint8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \ +}) +#else +#define vst3q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + uint32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \ +}) +#else +#define vst3q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + uint16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_s8(__p0, __p1) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \ +}) +#else +#define vst3q_s8(__p0, __p1) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + int8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_f32(__p0, __p1) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 41); \ +}) +#else +#define vst3q_f32(__p0, __p1) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + float32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_f16(__p0, __p1) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 40); \ +}) +#else +#define vst3q_f16(__p0, __p1) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + float16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_s32(__p0, __p1) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 34); \ +}) +#else +#define vst3q_s32(__p0, __p1) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + int32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_s16(__p0, __p1) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 33); \ +}) +#else +#define vst3q_s16(__p0, __p1) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + int16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_u8(__p0, __p1) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \ +}) +#else +#define vst3_u8(__p0, __p1) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + uint8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_u32(__p0, __p1) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \ +}) +#else +#define vst3_u32(__p0, __p1) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + uint32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_u64(__p0, __p1) __extension__ ({ \ + uint64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \ +}) +#else +#define vst3_u64(__p0, __p1) __extension__ ({ \ + uint64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_u16(__p0, __p1) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \ +}) +#else +#define vst3_u16(__p0, __p1) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + uint16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_s8(__p0, __p1) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \ +}) +#else +#define vst3_s8(__p0, __p1) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + int8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_f32(__p0, __p1) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 9); \ +}) +#else +#define vst3_f32(__p0, __p1) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + float32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_f16(__p0, __p1) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 8); \ +}) +#else +#define vst3_f16(__p0, __p1) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + float16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_s32(__p0, __p1) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 2); \ +}) +#else +#define vst3_s32(__p0, __p1) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + int32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 2); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_s64(__p0, __p1) __extension__ ({ \ + int64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \ +}) +#else +#define vst3_s64(__p0, __p1) __extension__ ({ \ + int64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_s16(__p0, __p1) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 1); \ +}) +#else +#define vst3_s16(__p0, __p1) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + int16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \ +}) +#else +#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + poly8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \ +}) +#else +#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + poly16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \ +}) +#else +#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + poly16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \ +}) +#else +#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + uint32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \ +}) +#else +#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + uint16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 41); \ +}) +#else +#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + float32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 40); \ +}) +#else +#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + float16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 34); \ +}) +#else +#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + int32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 33); \ +}) +#else +#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + int16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \ +}) +#else +#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + uint8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \ +}) +#else +#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + uint32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \ +}) +#else +#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + uint16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \ +}) +#else +#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + int8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 9); \ +}) +#else +#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + float32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 8); \ +}) +#else +#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + float16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 2); \ +}) +#else +#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + int32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 2); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 1); \ +}) +#else +#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + int16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_p8(__p0, __p1) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \ +}) +#else +#define vst4_p8(__p0, __p1) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + poly8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_p16(__p0, __p1) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \ +}) +#else +#define vst4_p16(__p0, __p1) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + poly16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \ +}) +#else +#define vst4q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + poly8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \ +}) +#else +#define vst4q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + poly16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \ +}) +#else +#define vst4q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + uint8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \ +}) +#else +#define vst4q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + uint32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \ +}) +#else +#define vst4q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + uint16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_s8(__p0, __p1) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \ +}) +#else +#define vst4q_s8(__p0, __p1) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + int8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_f32(__p0, __p1) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 41); \ +}) +#else +#define vst4q_f32(__p0, __p1) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + float32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_f16(__p0, __p1) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 40); \ +}) +#else +#define vst4q_f16(__p0, __p1) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + float16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_s32(__p0, __p1) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 34); \ +}) +#else +#define vst4q_s32(__p0, __p1) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + int32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_s16(__p0, __p1) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 33); \ +}) +#else +#define vst4q_s16(__p0, __p1) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + int16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_u8(__p0, __p1) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \ +}) +#else +#define vst4_u8(__p0, __p1) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + uint8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_u32(__p0, __p1) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \ +}) +#else +#define vst4_u32(__p0, __p1) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + uint32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_u64(__p0, __p1) __extension__ ({ \ + uint64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \ +}) +#else +#define vst4_u64(__p0, __p1) __extension__ ({ \ + uint64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_u16(__p0, __p1) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \ +}) +#else +#define vst4_u16(__p0, __p1) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + uint16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_s8(__p0, __p1) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \ +}) +#else +#define vst4_s8(__p0, __p1) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + int8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_f32(__p0, __p1) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 9); \ +}) +#else +#define vst4_f32(__p0, __p1) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + float32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_f16(__p0, __p1) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 8); \ +}) +#else +#define vst4_f16(__p0, __p1) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + float16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_s32(__p0, __p1) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 2); \ +}) +#else +#define vst4_s32(__p0, __p1) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + int32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 2); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_s64(__p0, __p1) __extension__ ({ \ + int64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \ +}) +#else +#define vst4_s64(__p0, __p1) __extension__ ({ \ + int64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_s16(__p0, __p1) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 1); \ +}) +#else +#define vst4_s16(__p0, __p1) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + int16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \ +}) +#else +#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + poly8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \ +}) +#else +#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + poly16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \ +}) +#else +#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + poly16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \ +}) +#else +#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + uint32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \ +}) +#else +#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + uint16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 41); \ +}) +#else +#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + float32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 40); \ +}) +#else +#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + float16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 34); \ +}) +#else +#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + int32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 33); \ +}) +#else +#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + int16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \ +}) +#else +#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + uint8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \ +}) +#else +#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + uint32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \ +}) +#else +#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + uint16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \ +}) +#else +#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + int8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 9); \ +}) +#else +#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + float32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 8); \ +}) +#else +#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + float16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 2); \ +}) +#else +#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + int32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 2); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 1); \ +}) +#else +#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + int16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint16x4_t __noswap_vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint32x2_t __noswap_vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint8x8_t __noswap_vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int16x4_t __noswap_vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int32x2_t __noswap_vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int8x8_t __noswap_vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = vmovl_u8(__p0) - vmovl_u8(__p1); + return __ret; +} +#else +__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vmovl_u8(__rev0) - __noswap_vmovl_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = vmovl_u32(__p0) - vmovl_u32(__p1); + return __ret; +} +#else +__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vmovl_u32(__rev0) - __noswap_vmovl_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = vmovl_u16(__p0) - vmovl_u16(__p1); + return __ret; +} +#else +__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vmovl_u16(__rev0) - __noswap_vmovl_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = vmovl_s8(__p0) - vmovl_s8(__p1); + return __ret; +} +#else +__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vmovl_s8(__rev0) - __noswap_vmovl_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = vmovl_s32(__p0) - vmovl_s32(__p1); + return __ret; +} +#else +__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __noswap_vmovl_s32(__rev0) - __noswap_vmovl_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = vmovl_s16(__p0) - vmovl_s16(__p1); + return __ret; +} +#else +__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vmovl_s16(__rev0) - __noswap_vmovl_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 - vmovl_u8(__p1); + return __ret; +} +#else +__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 - __noswap_vmovl_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 - vmovl_u32(__p1); + return __ret; +} +#else +__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __rev0 - __noswap_vmovl_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 - vmovl_u16(__p1); + return __ret; +} +#else +__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 - __noswap_vmovl_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = __p0 - vmovl_s8(__p1); + return __ret; +} +#else +__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 - __noswap_vmovl_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = __p0 - vmovl_s32(__p1); + return __ret; +} +#else +__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __rev0 - __noswap_vmovl_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = __p0 - vmovl_s16(__p1); + return __ret; +} +#else +__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 - __noswap_vmovl_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) { + poly8x8x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) { + uint8x8x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) { + int8x8x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) { + poly8x8x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) { + uint8x8x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) { + int8x8x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) { + poly8x8x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) { + uint8x8x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) { + int8x8x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); + return __ret; +} +#else +__ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); + return __ret; +} +#else +__ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); + return __ret; +} +#else +__ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); + return __ret; +} +#else +__ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); + return __ret; +} +#else +__ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); + return __ret; +} +#else +__ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#if !defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#endif +#if (__ARM_FP & 2) +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 8); + return __ret; +} +#else +__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float16x4_t __noswap_vcvt_f16_f32(float32x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float32x4_t __noswap_vcvt_f32_f16(float16x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 41); + return __ret; +} +#endif + +#endif +#if __ARM_ARCH >= 8 +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vcvta_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vcvta_s32_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#endif +#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING) +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrndq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrndq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrnd_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrnd_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrndaq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrndaq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrnda_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrnda_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrndmq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrndmq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrndm_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrndm_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrndnq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrndnq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrndn_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrndn_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrndpq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrndpq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrndp_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrndp_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrndxq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrndxq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrndx_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrndx_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#endif +#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN) +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#endif +#if __ARM_ARCH >= 8 && defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vcvta_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3); + return __ret; +} +#else +__ai int64x1_t vcvta_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19); + return __ret; +} +#else +__ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3); + return __ret; +} +#else +__ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19); + return __ret; +} +#else +__ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3); + return __ret; +} +#else +__ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19); + return __ret; +} +#else +__ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3); + return __ret; +} +#else +__ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19); + return __ret; +} +#else +__ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#else +__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#else +__ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#else +__ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#else +__ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#else +__ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#else +__ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#else +__ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#else +__ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#else +__ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#else +__ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#else +__ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#else +__ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#else +__ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#else +__ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#else +__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#else +__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#else +__ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#else +__ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#else +__ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#else +__ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#else +__ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#else +__ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#else +__ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#else +__ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#else +__ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#else +__ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#else +__ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#else +__ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#else +__ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#else +__ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#else +__ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#else +__ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#else +__ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#else +__ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#else +__ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#else +__ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#else +__ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#else +__ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#else +__ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#else +__ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#else +__ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#else +__ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#else +__ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#else +__ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#else +__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#else +__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#else +__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#else +__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#else +__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#else +__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#else +__ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#else +__ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#else +__ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#else +__ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#else +__ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#else +__ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#else +__ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#else +__ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#else +__ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#else +__ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#else +__ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#else +__ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#else +__ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#else +__ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#else +__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#else +__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#else +__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#else +__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#else +__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#else +__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#else +__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#else +__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#else +__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#else +__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#else +__ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#else +__ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#else +__ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#else +__ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#else +__ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#else +__ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#else +__ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#else +__ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#else +__ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#else +__ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#else +__ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#else +__ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#else +__ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#else +__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#else +__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#else +__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#else +__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#else +__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif + +#endif +#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING) +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vrnd_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10); + return __ret; +} +#else +__ai float64x1_t vrnd_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndaq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndaq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vrnda_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10); + return __ret; +} +#else +__ai float64x1_t vrnda_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndiq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndiq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vrndiq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vrndiq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vrndi_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10); + return __ret; +} +#else +__ai float64x1_t vrndi_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vrndi_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vrndi_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndmq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndmq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vrndm_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10); + return __ret; +} +#else +__ai float64x1_t vrndm_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndnq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndnq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vrndn_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10); + return __ret; +} +#else +__ai float64x1_t vrndn_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndpq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndpq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vrndp_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10); + return __ret; +} +#else +__ai float64x1_t vrndp_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrndxq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrndxq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vrndx_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10); + return __ret; +} +#else +__ai float64x1_t vrndx_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10); + return __ret; +} +#endif + +#endif +#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN) +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#else +__ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#else +__ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#endif + +#endif +#if __ARM_FEATURE_CRYPTO +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2); + return __ret; +} +#else +__ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vsha1h_u32(uint32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0); + return __ret; +} +#else +__ai uint32_t vsha1h_u32(uint32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2); + return __ret; +} +#else +__ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2); + return __ret; +} +#else +__ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#endif +#if defined(__ARM_FEATURE_FMA) +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = vfmaq_f32(__p0, -__p1, __p2); + return __ret; +} +#else +__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __noswap_vfmaq_f32(__rev0, -__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = vfma_f32(__p0, -__p1, __p2); + return __ret; +} +#else +__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float32x2_t __ret; + __ret = __noswap_vfma_f32(__rev0, -__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#endif +#if defined(__ARM_FEATURE_QRDMX) +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = vqaddq_s32(__p0, vqrdmulhq_s32(__p1, __p2)); + return __ret; +} +#else +__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = vqaddq_s16(__p0, vqrdmulhq_s16(__p1, __p2)); + return __ret; +} +#else +__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = vqadd_s32(__p0, vqrdmulh_s32(__p1, __p2)); + return __ret; +} +#else +__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int32x2_t __ret; + __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = vqadd_s16(__p0, vqrdmulh_s16(__p1, __p2)); + return __ret; +} +#else +__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x4_t __ret; \ + __ret = vqaddq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \ + __ret; \ +}) +#else +#define vqrdmlahq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + int32x4_t __ret; \ + __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x8_t __ret; \ + __ret = vqaddq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \ + __ret; \ +}) +#else +#define vqrdmlahq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlah_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x2_t __ret; \ + __ret = vqadd_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \ + __ret; \ +}) +#else +#define vqrdmlah_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + int32x2_t __ret; \ + __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlah_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x4_t __ret; \ + __ret = vqadd_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \ + __ret; \ +}) +#else +#define vqrdmlah_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = vqsubq_s32(__p0, vqrdmulhq_s32(__p1, __p2)); + return __ret; +} +#else +__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = vqsubq_s16(__p0, vqrdmulhq_s16(__p1, __p2)); + return __ret; +} +#else +__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = vqsub_s32(__p0, vqrdmulh_s32(__p1, __p2)); + return __ret; +} +#else +__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int32x2_t __ret; + __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = vqsub_s16(__p0, vqrdmulh_s16(__p1, __p2)); + return __ret; +} +#else +__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x4_t __ret; \ + __ret = vqsubq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \ + __ret; \ +}) +#else +#define vqrdmlshq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + int32x4_t __ret; \ + __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x8_t __ret; \ + __ret = vqsubq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \ + __ret; \ +}) +#else +#define vqrdmlshq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlsh_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x2_t __ret; \ + __ret = vqsub_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \ + __ret; \ +}) +#else +#define vqrdmlsh_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + int32x2_t __ret; \ + __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlsh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x4_t __ret; \ + __ret = vqsub_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \ + __ret; \ +}) +#else +#define vqrdmlsh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#endif +#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x4_t __ret; \ + __ret = vqaddq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \ + __ret; \ +}) +#else +#define vqrdmlahq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x8_t __ret; \ + __ret = vqaddq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \ + __ret; \ +}) +#else +#define vqrdmlahq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlah_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x2_t __ret; \ + __ret = vqadd_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \ + __ret; \ +}) +#else +#define vqrdmlah_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int32x2_t __ret; \ + __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlah_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x4_t __ret; \ + __ret = vqadd_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \ + __ret; \ +}) +#else +#define vqrdmlah_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x4_t __ret; \ + __ret = vqsubq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \ + __ret; \ +}) +#else +#define vqrdmlshq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x8_t __ret; \ + __ret = vqsubq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \ + __ret; \ +}) +#else +#define vqrdmlshq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlsh_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x2_t __ret; \ + __ret = vqsub_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \ + __ret; \ +}) +#else +#define vqrdmlsh_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int32x2_t __ret; \ + __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlsh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x4_t __ret; \ + __ret = vqsub_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \ + __ret; \ +}) +#else +#define vqrdmlsh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#endif +#if defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#else +__ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1); + return __ret; +} +#else +__ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vabds_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1); + return __ret; +} +#else +__ai float32_t vabds_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vabsq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vabsq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vabsq_s64(int64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vabsq_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vabs_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10); + return __ret; +} +#else +__ai float64x1_t vabs_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vabs_s64(int64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3); + return __ret; +} +#else +__ai int64x1_t vabs_s64(int64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vabsd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vabsd_s64(__p0); + return __ret; +} +#else +__ai int64_t vabsd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vabsd_s64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1); + return __ret; +} +#else +__ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1); + return __ret; +} +#else +__ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vaddhn_u32(__p1, __p2)); + return __ret; +} +#else +__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vcombine_u16(__rev0, __noswap_vaddhn_u32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vaddhn_u64(__p1, __p2)); + return __ret; +} +#else +__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vcombine_u32(__rev0, __noswap_vaddhn_u64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vaddhn_u16(__p1, __p2)); + return __ret; +} +#else +__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __noswap_vcombine_u8(__rev0, __noswap_vaddhn_u16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vaddhn_s32(__p1, __p2)); + return __ret; +} +#else +__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vcombine_s16(__rev0, __noswap_vaddhn_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vaddhn_s64(__p1, __p2)); + return __ret; +} +#else +__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vcombine_s32(__rev0, __noswap_vaddhn_s64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vaddhn_s16(__p1, __p2)); + return __ret; +} +#else +__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __noswap_vcombine_s8(__rev0, __noswap_vaddhn_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vaddlvq_u8(uint8x16_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddlvq_u8((int8x16_t)__p0); + return __ret; +} +#else +__ai uint16_t vaddlvq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddlvq_u8((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vaddlvq_u32(uint32x4_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddlvq_u32((int8x16_t)__p0); + return __ret; +} +#else +__ai uint64_t vaddlvq_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddlvq_u32((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vaddlvq_u16(uint16x8_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddlvq_u16((int8x16_t)__p0); + return __ret; +} +#else +__ai uint32_t vaddlvq_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddlvq_u16((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vaddlvq_s8(int8x16_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddlvq_s8((int8x16_t)__p0); + return __ret; +} +#else +__ai int16_t vaddlvq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddlvq_s8((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vaddlvq_s32(int32x4_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddlvq_s32((int8x16_t)__p0); + return __ret; +} +#else +__ai int64_t vaddlvq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddlvq_s32((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vaddlvq_s16(int16x8_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddlvq_s16((int8x16_t)__p0); + return __ret; +} +#else +__ai int32_t vaddlvq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddlvq_s16((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vaddlv_u8(uint8x8_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddlv_u8((int8x8_t)__p0); + return __ret; +} +#else +__ai uint16_t vaddlv_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddlv_u8((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vaddlv_u32(uint32x2_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddlv_u32((int8x8_t)__p0); + return __ret; +} +#else +__ai uint64_t vaddlv_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddlv_u32((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vaddlv_u16(uint16x4_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddlv_u16((int8x8_t)__p0); + return __ret; +} +#else +__ai uint32_t vaddlv_u16(uint16x4_t __p0) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddlv_u16((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vaddlv_s8(int8x8_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddlv_s8((int8x8_t)__p0); + return __ret; +} +#else +__ai int16_t vaddlv_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddlv_s8((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vaddlv_s32(int32x2_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddlv_s32((int8x8_t)__p0); + return __ret; +} +#else +__ai int64_t vaddlv_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddlv_s32((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vaddlv_s16(int16x4_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddlv_s16((int8x8_t)__p0); + return __ret; +} +#else +__ai int32_t vaddlv_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddlv_s16((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8_t vaddvq_u8(uint8x16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vaddvq_u8((int8x16_t)__p0); + return __ret; +} +#else +__ai uint8_t vaddvq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vaddvq_u8((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vaddvq_u32(uint32x4_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddvq_u32((int8x16_t)__p0); + return __ret; +} +#else +__ai uint32_t vaddvq_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddvq_u32((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vaddvq_u64(uint64x2_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddvq_u64((int8x16_t)__p0); + return __ret; +} +#else +__ai uint64_t vaddvq_u64(uint64x2_t __p0) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddvq_u64((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vaddvq_u16(uint16x8_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddvq_u16((int8x16_t)__p0); + return __ret; +} +#else +__ai uint16_t vaddvq_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddvq_u16((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vaddvq_s8(int8x16_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vaddvq_s8((int8x16_t)__p0); + return __ret; +} +#else +__ai int8_t vaddvq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8_t __ret; + __ret = (int8_t) __builtin_neon_vaddvq_s8((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vaddvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vaddvq_f64((int8x16_t)__p0); + return __ret; +} +#else +__ai float64_t vaddvq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64_t __ret; + __ret = (float64_t) __builtin_neon_vaddvq_f64((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vaddvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vaddvq_f32((int8x16_t)__p0); + return __ret; +} +#else +__ai float32_t vaddvq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vaddvq_f32((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vaddvq_s32(int32x4_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddvq_s32((int8x16_t)__p0); + return __ret; +} +#else +__ai int32_t vaddvq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddvq_s32((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vaddvq_s64(int64x2_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddvq_s64((int8x16_t)__p0); + return __ret; +} +#else +__ai int64_t vaddvq_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddvq_s64((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vaddvq_s16(int16x8_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddvq_s16((int8x16_t)__p0); + return __ret; +} +#else +__ai int16_t vaddvq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddvq_s16((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8_t vaddv_u8(uint8x8_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vaddv_u8((int8x8_t)__p0); + return __ret; +} +#else +__ai uint8_t vaddv_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vaddv_u8((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vaddv_u32(uint32x2_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddv_u32((int8x8_t)__p0); + return __ret; +} +#else +__ai uint32_t vaddv_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddv_u32((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vaddv_u16(uint16x4_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddv_u16((int8x8_t)__p0); + return __ret; +} +#else +__ai uint16_t vaddv_u16(uint16x4_t __p0) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddv_u16((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vaddv_s8(int8x8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vaddv_s8((int8x8_t)__p0); + return __ret; +} +#else +__ai int8_t vaddv_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8_t __ret; + __ret = (int8_t) __builtin_neon_vaddv_s8((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vaddv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vaddv_f32((int8x8_t)__p0); + return __ret; +} +#else +__ai float32_t vaddv_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vaddv_f32((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vaddv_s32(int32x2_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddv_s32((int8x8_t)__p0); + return __ret; +} +#else +__ai int32_t vaddv_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddv_s32((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vaddv_s16(int16x4_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddv_s16((int8x8_t)__p0); + return __ret; +} +#else +__ai int16_t vaddv_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddv_s16((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) { + poly64x1_t __ret; + __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6); + return __ret; +} +#else +__ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) { + poly64x1_t __ret; + __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) { + poly64x2_t __ret; + __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 38); + return __ret; +} +#else +__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + poly64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + poly64x2_t __ret; + __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 38); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#else +__ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#else +__ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1); + return __ret; +} +#else +__ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1); + return __ret; +} +#else +__ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#else +__ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1); + return __ret; +} +#else +__ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1); + return __ret; +} +#else +__ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#else +__ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1); + return __ret; +} +#else +__ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1); + return __ret; +} +#else +__ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#else +__ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1); + return __ret; +} +#else +__ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1); + return __ret; +} +#else +__ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 == __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 == __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 == __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 == __p1); + return __ret; +} +#else +__ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 == __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1); + return __ret; +} +#else +__ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vceqd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vceqd_s64(__p0, __p1); + return __ret; +} +#else +__ai int64_t vceqd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vceqd_s64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1); + return __ret; +} +#else +__ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1); + return __ret; +} +#else +__ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vceqz_p8(poly8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vceqz_p8(poly8x8_t __p0) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vceqz_p64(poly64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); + return __ret; +} +#else +__ai uint64x1_t vceqz_p64(poly64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vceqz_p16(poly16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vceqz_p16(poly16x4_t __p0) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vceqzq_p16(poly16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vceqzq_p16(poly16x8_t __p0) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vceqzq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vceqzq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vceqzq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vceqzq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vceqzq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vceqzq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vceqzq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vceqzq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vceqzq_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vceqzq_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vceqzq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vceqzq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vceqz_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vceqz_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vceqz_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vceqz_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vceqz_u64(uint64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); + return __ret; +} +#else +__ai uint64x1_t vceqz_u64(uint64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vceqz_u16(uint16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vceqz_u16(uint16x4_t __p0) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vceqz_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vceqz_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vceqz_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); + return __ret; +} +#else +__ai uint64x1_t vceqz_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vceqz_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vceqz_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vceqz_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vceqz_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vceqz_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); + return __ret; +} +#else +__ai uint64x1_t vceqz_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vceqz_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vceqz_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vceqzd_u64(uint64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0); + return __ret; +} +#else +__ai uint64_t vceqzd_u64(uint64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vceqzd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vceqzd_s64(__p0); + return __ret; +} +#else +__ai int64_t vceqzd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vceqzd_s64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vceqzd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0); + return __ret; +} +#else +__ai uint64_t vceqzd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vceqzs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0); + return __ret; +} +#else +__ai uint32_t vceqzs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 >= __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 >= __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 >= __p1); + return __ret; +} +#else +__ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 >= __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vcged_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcged_s64(__p0, __p1); + return __ret; +} +#else +__ai int64_t vcged_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcged_s64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1); + return __ret; +} +#else +__ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1); + return __ret; +} +#else +__ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1); + return __ret; +} +#else +__ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcgezq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vcgezq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgezq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcgezq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgezq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcgezq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgezq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcgezq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgezq_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcgezq_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcgezq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vcgezq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcgez_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vcgez_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcgez_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19); + return __ret; +} +#else +__ai uint64x1_t vcgez_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcgez_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcgez_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcgez_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcgez_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcgez_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19); + return __ret; +} +#else +__ai uint64x1_t vcgez_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcgez_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vcgez_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vcgezd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcgezd_s64(__p0); + return __ret; +} +#else +__ai int64_t vcgezd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcgezd_s64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vcgezd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0); + return __ret; +} +#else +__ai uint64_t vcgezd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vcgezs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0); + return __ret; +} +#else +__ai uint32_t vcgezs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 > __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 > __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 > __p1); + return __ret; +} +#else +__ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 > __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vcgtd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcgtd_s64(__p0, __p1); + return __ret; +} +#else +__ai int64_t vcgtd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcgtd_s64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1); + return __ret; +} +#else +__ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1); + return __ret; +} +#else +__ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1); + return __ret; +} +#else +__ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcgtz_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vcgtz_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcgtz_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19); + return __ret; +} +#else +__ai uint64x1_t vcgtz_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcgtz_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcgtz_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcgtz_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcgtz_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcgtz_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19); + return __ret; +} +#else +__ai uint64x1_t vcgtz_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcgtz_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vcgtz_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vcgtzd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcgtzd_s64(__p0); + return __ret; +} +#else +__ai int64_t vcgtzd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcgtzd_s64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vcgtzd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0); + return __ret; +} +#else +__ai uint64_t vcgtzd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vcgtzs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0); + return __ret; +} +#else +__ai uint32_t vcgtzs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 <= __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 <= __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 <= __p1); + return __ret; +} +#else +__ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 <= __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1); + return __ret; +} +#else +__ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vcled_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcled_s64(__p0, __p1); + return __ret; +} +#else +__ai int64_t vcled_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcled_s64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1); + return __ret; +} +#else +__ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1); + return __ret; +} +#else +__ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vclezq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vclezq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vclezq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vclezq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vclezq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vclezq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vclezq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vclezq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vclezq_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vclezq_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vclezq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vclezq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vclez_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vclez_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vclez_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19); + return __ret; +} +#else +__ai uint64x1_t vclez_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vclez_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vclez_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vclez_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vclez_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vclez_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19); + return __ret; +} +#else +__ai uint64x1_t vclez_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vclez_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vclez_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vclezd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vclezd_s64(__p0); + return __ret; +} +#else +__ai int64_t vclezd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vclezd_s64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vclezd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0); + return __ret; +} +#else +__ai uint64_t vclezd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vclezs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0); + return __ret; +} +#else +__ai uint32_t vclezs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 < __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 < __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 < __p1); + return __ret; +} +#else +__ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 < __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1); + return __ret; +} +#else +__ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vcltd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcltd_s64(__p0, __p1); + return __ret; +} +#else +__ai int64_t vcltd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcltd_s64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1); + return __ret; +} +#else +__ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1); + return __ret; +} +#else +__ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vcltzq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vcltzq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcltzq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcltzq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcltzq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcltzq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vcltzq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai uint32x4_t vcltzq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcltzq_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcltzq_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vcltzq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai uint16x8_t vcltzq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vcltz_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vcltz_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcltz_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19); + return __ret; +} +#else +__ai uint64x1_t vcltz_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcltz_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcltz_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vcltz_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai uint32x2_t vcltz_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcltz_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19); + return __ret; +} +#else +__ai uint64x1_t vcltz_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vcltz_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai uint16x4_t vcltz_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vcltzd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcltzd_s64(__p0); + return __ret; +} +#else +__ai int64_t vcltzd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcltzd_s64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vcltzd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0); + return __ret; +} +#else +__ai uint64_t vcltzd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vcltzs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0); + return __ret; +} +#else +__ai uint32_t vcltzs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + return __ret; +} +#else +__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + return __ret; +} +#else +__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_p8(__p0_0, __p1_0, __p2_0, __p3_0) __extension__ ({ \ + poly8x16_t __s0_0 = __p0_0; \ + poly8x8_t __s2_0 = __p2_0; \ + poly8x16_t __ret_0; \ + __ret_0 = vsetq_lane_p8(vget_lane_p8(__s2_0, __p3_0), __s0_0, __p1_0); \ + __ret_0; \ +}) +#else +#define vcopyq_lane_p8(__p0_1, __p1_1, __p2_1, __p3_1) __extension__ ({ \ + poly8x16_t __s0_1 = __p0_1; \ + poly8x8_t __s2_1 = __p2_1; \ + poly8x16_t __rev0_1; __rev0_1 = __builtin_shufflevector(__s0_1, __s0_1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev2_1; __rev2_1 = __builtin_shufflevector(__s2_1, __s2_1, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __ret_1; \ + __ret_1 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_1, __p3_1), __rev0_1, __p1_1); \ + __ret_1 = __builtin_shufflevector(__ret_1, __ret_1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_1; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_p16(__p0_2, __p1_2, __p2_2, __p3_2) __extension__ ({ \ + poly16x8_t __s0_2 = __p0_2; \ + poly16x4_t __s2_2 = __p2_2; \ + poly16x8_t __ret_2; \ + __ret_2 = vsetq_lane_p16(vget_lane_p16(__s2_2, __p3_2), __s0_2, __p1_2); \ + __ret_2; \ +}) +#else +#define vcopyq_lane_p16(__p0_3, __p1_3, __p2_3, __p3_3) __extension__ ({ \ + poly16x8_t __s0_3 = __p0_3; \ + poly16x4_t __s2_3 = __p2_3; \ + poly16x8_t __rev0_3; __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x4_t __rev2_3; __rev2_3 = __builtin_shufflevector(__s2_3, __s2_3, 3, 2, 1, 0); \ + poly16x8_t __ret_3; \ + __ret_3 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_3, __p3_3), __rev0_3, __p1_3); \ + __ret_3 = __builtin_shufflevector(__ret_3, __ret_3, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_3; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_u8(__p0_4, __p1_4, __p2_4, __p3_4) __extension__ ({ \ + uint8x16_t __s0_4 = __p0_4; \ + uint8x8_t __s2_4 = __p2_4; \ + uint8x16_t __ret_4; \ + __ret_4 = vsetq_lane_u8(vget_lane_u8(__s2_4, __p3_4), __s0_4, __p1_4); \ + __ret_4; \ +}) +#else +#define vcopyq_lane_u8(__p0_5, __p1_5, __p2_5, __p3_5) __extension__ ({ \ + uint8x16_t __s0_5 = __p0_5; \ + uint8x8_t __s2_5 = __p2_5; \ + uint8x16_t __rev0_5; __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_5; __rev2_5 = __builtin_shufflevector(__s2_5, __s2_5, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret_5; \ + __ret_5 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_5, __p3_5), __rev0_5, __p1_5); \ + __ret_5 = __builtin_shufflevector(__ret_5, __ret_5, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_5; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_u32(__p0_6, __p1_6, __p2_6, __p3_6) __extension__ ({ \ + uint32x4_t __s0_6 = __p0_6; \ + uint32x2_t __s2_6 = __p2_6; \ + uint32x4_t __ret_6; \ + __ret_6 = vsetq_lane_u32(vget_lane_u32(__s2_6, __p3_6), __s0_6, __p1_6); \ + __ret_6; \ +}) +#else +#define vcopyq_lane_u32(__p0_7, __p1_7, __p2_7, __p3_7) __extension__ ({ \ + uint32x4_t __s0_7 = __p0_7; \ + uint32x2_t __s2_7 = __p2_7; \ + uint32x4_t __rev0_7; __rev0_7 = __builtin_shufflevector(__s0_7, __s0_7, 3, 2, 1, 0); \ + uint32x2_t __rev2_7; __rev2_7 = __builtin_shufflevector(__s2_7, __s2_7, 1, 0); \ + uint32x4_t __ret_7; \ + __ret_7 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_7, __p3_7), __rev0_7, __p1_7); \ + __ret_7 = __builtin_shufflevector(__ret_7, __ret_7, 3, 2, 1, 0); \ + __ret_7; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_u64(__p0_8, __p1_8, __p2_8, __p3_8) __extension__ ({ \ + uint64x2_t __s0_8 = __p0_8; \ + uint64x1_t __s2_8 = __p2_8; \ + uint64x2_t __ret_8; \ + __ret_8 = vsetq_lane_u64(vget_lane_u64(__s2_8, __p3_8), __s0_8, __p1_8); \ + __ret_8; \ +}) +#else +#define vcopyq_lane_u64(__p0_9, __p1_9, __p2_9, __p3_9) __extension__ ({ \ + uint64x2_t __s0_9 = __p0_9; \ + uint64x1_t __s2_9 = __p2_9; \ + uint64x2_t __rev0_9; __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 1, 0); \ + uint64x2_t __ret_9; \ + __ret_9 = __noswap_vsetq_lane_u64(__noswap_vget_lane_u64(__s2_9, __p3_9), __rev0_9, __p1_9); \ + __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 1, 0); \ + __ret_9; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_u16(__p0_10, __p1_10, __p2_10, __p3_10) __extension__ ({ \ + uint16x8_t __s0_10 = __p0_10; \ + uint16x4_t __s2_10 = __p2_10; \ + uint16x8_t __ret_10; \ + __ret_10 = vsetq_lane_u16(vget_lane_u16(__s2_10, __p3_10), __s0_10, __p1_10); \ + __ret_10; \ +}) +#else +#define vcopyq_lane_u16(__p0_11, __p1_11, __p2_11, __p3_11) __extension__ ({ \ + uint16x8_t __s0_11 = __p0_11; \ + uint16x4_t __s2_11 = __p2_11; \ + uint16x8_t __rev0_11; __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_11; __rev2_11 = __builtin_shufflevector(__s2_11, __s2_11, 3, 2, 1, 0); \ + uint16x8_t __ret_11; \ + __ret_11 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_11, __p3_11), __rev0_11, __p1_11); \ + __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_11; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_s8(__p0_12, __p1_12, __p2_12, __p3_12) __extension__ ({ \ + int8x16_t __s0_12 = __p0_12; \ + int8x8_t __s2_12 = __p2_12; \ + int8x16_t __ret_12; \ + __ret_12 = vsetq_lane_s8(vget_lane_s8(__s2_12, __p3_12), __s0_12, __p1_12); \ + __ret_12; \ +}) +#else +#define vcopyq_lane_s8(__p0_13, __p1_13, __p2_13, __p3_13) __extension__ ({ \ + int8x16_t __s0_13 = __p0_13; \ + int8x8_t __s2_13 = __p2_13; \ + int8x16_t __rev0_13; __rev0_13 = __builtin_shufflevector(__s0_13, __s0_13, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_13; __rev2_13 = __builtin_shufflevector(__s2_13, __s2_13, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret_13; \ + __ret_13 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_13, __p3_13), __rev0_13, __p1_13); \ + __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_13; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_f32(__p0_14, __p1_14, __p2_14, __p3_14) __extension__ ({ \ + float32x4_t __s0_14 = __p0_14; \ + float32x2_t __s2_14 = __p2_14; \ + float32x4_t __ret_14; \ + __ret_14 = vsetq_lane_f32(vget_lane_f32(__s2_14, __p3_14), __s0_14, __p1_14); \ + __ret_14; \ +}) +#else +#define vcopyq_lane_f32(__p0_15, __p1_15, __p2_15, __p3_15) __extension__ ({ \ + float32x4_t __s0_15 = __p0_15; \ + float32x2_t __s2_15 = __p2_15; \ + float32x4_t __rev0_15; __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 3, 2, 1, 0); \ + float32x2_t __rev2_15; __rev2_15 = __builtin_shufflevector(__s2_15, __s2_15, 1, 0); \ + float32x4_t __ret_15; \ + __ret_15 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_15, __p3_15), __rev0_15, __p1_15); \ + __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 3, 2, 1, 0); \ + __ret_15; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_s32(__p0_16, __p1_16, __p2_16, __p3_16) __extension__ ({ \ + int32x4_t __s0_16 = __p0_16; \ + int32x2_t __s2_16 = __p2_16; \ + int32x4_t __ret_16; \ + __ret_16 = vsetq_lane_s32(vget_lane_s32(__s2_16, __p3_16), __s0_16, __p1_16); \ + __ret_16; \ +}) +#else +#define vcopyq_lane_s32(__p0_17, __p1_17, __p2_17, __p3_17) __extension__ ({ \ + int32x4_t __s0_17 = __p0_17; \ + int32x2_t __s2_17 = __p2_17; \ + int32x4_t __rev0_17; __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 3, 2, 1, 0); \ + int32x2_t __rev2_17; __rev2_17 = __builtin_shufflevector(__s2_17, __s2_17, 1, 0); \ + int32x4_t __ret_17; \ + __ret_17 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_17, __p3_17), __rev0_17, __p1_17); \ + __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 3, 2, 1, 0); \ + __ret_17; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_s64(__p0_18, __p1_18, __p2_18, __p3_18) __extension__ ({ \ + int64x2_t __s0_18 = __p0_18; \ + int64x1_t __s2_18 = __p2_18; \ + int64x2_t __ret_18; \ + __ret_18 = vsetq_lane_s64(vget_lane_s64(__s2_18, __p3_18), __s0_18, __p1_18); \ + __ret_18; \ +}) +#else +#define vcopyq_lane_s64(__p0_19, __p1_19, __p2_19, __p3_19) __extension__ ({ \ + int64x2_t __s0_19 = __p0_19; \ + int64x1_t __s2_19 = __p2_19; \ + int64x2_t __rev0_19; __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 1, 0); \ + int64x2_t __ret_19; \ + __ret_19 = __noswap_vsetq_lane_s64(__noswap_vget_lane_s64(__s2_19, __p3_19), __rev0_19, __p1_19); \ + __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 1, 0); \ + __ret_19; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_s16(__p0_20, __p1_20, __p2_20, __p3_20) __extension__ ({ \ + int16x8_t __s0_20 = __p0_20; \ + int16x4_t __s2_20 = __p2_20; \ + int16x8_t __ret_20; \ + __ret_20 = vsetq_lane_s16(vget_lane_s16(__s2_20, __p3_20), __s0_20, __p1_20); \ + __ret_20; \ +}) +#else +#define vcopyq_lane_s16(__p0_21, __p1_21, __p2_21, __p3_21) __extension__ ({ \ + int16x8_t __s0_21 = __p0_21; \ + int16x4_t __s2_21 = __p2_21; \ + int16x8_t __rev0_21; __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_21; __rev2_21 = __builtin_shufflevector(__s2_21, __s2_21, 3, 2, 1, 0); \ + int16x8_t __ret_21; \ + __ret_21 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_21, __p3_21), __rev0_21, __p1_21); \ + __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_21; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_p8(__p0_22, __p1_22, __p2_22, __p3_22) __extension__ ({ \ + poly8x8_t __s0_22 = __p0_22; \ + poly8x8_t __s2_22 = __p2_22; \ + poly8x8_t __ret_22; \ + __ret_22 = vset_lane_p8(vget_lane_p8(__s2_22, __p3_22), __s0_22, __p1_22); \ + __ret_22; \ +}) +#else +#define vcopy_lane_p8(__p0_23, __p1_23, __p2_23, __p3_23) __extension__ ({ \ + poly8x8_t __s0_23 = __p0_23; \ + poly8x8_t __s2_23 = __p2_23; \ + poly8x8_t __rev0_23; __rev0_23 = __builtin_shufflevector(__s0_23, __s0_23, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev2_23; __rev2_23 = __builtin_shufflevector(__s2_23, __s2_23, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __ret_23; \ + __ret_23 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_23, __p3_23), __rev0_23, __p1_23); \ + __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_23; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_p16(__p0_24, __p1_24, __p2_24, __p3_24) __extension__ ({ \ + poly16x4_t __s0_24 = __p0_24; \ + poly16x4_t __s2_24 = __p2_24; \ + poly16x4_t __ret_24; \ + __ret_24 = vset_lane_p16(vget_lane_p16(__s2_24, __p3_24), __s0_24, __p1_24); \ + __ret_24; \ +}) +#else +#define vcopy_lane_p16(__p0_25, __p1_25, __p2_25, __p3_25) __extension__ ({ \ + poly16x4_t __s0_25 = __p0_25; \ + poly16x4_t __s2_25 = __p2_25; \ + poly16x4_t __rev0_25; __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, 3, 2, 1, 0); \ + poly16x4_t __rev2_25; __rev2_25 = __builtin_shufflevector(__s2_25, __s2_25, 3, 2, 1, 0); \ + poly16x4_t __ret_25; \ + __ret_25 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_25, __p3_25), __rev0_25, __p1_25); \ + __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 3, 2, 1, 0); \ + __ret_25; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_u8(__p0_26, __p1_26, __p2_26, __p3_26) __extension__ ({ \ + uint8x8_t __s0_26 = __p0_26; \ + uint8x8_t __s2_26 = __p2_26; \ + uint8x8_t __ret_26; \ + __ret_26 = vset_lane_u8(vget_lane_u8(__s2_26, __p3_26), __s0_26, __p1_26); \ + __ret_26; \ +}) +#else +#define vcopy_lane_u8(__p0_27, __p1_27, __p2_27, __p3_27) __extension__ ({ \ + uint8x8_t __s0_27 = __p0_27; \ + uint8x8_t __s2_27 = __p2_27; \ + uint8x8_t __rev0_27; __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_27; __rev2_27 = __builtin_shufflevector(__s2_27, __s2_27, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret_27; \ + __ret_27 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_27, __p3_27), __rev0_27, __p1_27); \ + __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_27; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_u32(__p0_28, __p1_28, __p2_28, __p3_28) __extension__ ({ \ + uint32x2_t __s0_28 = __p0_28; \ + uint32x2_t __s2_28 = __p2_28; \ + uint32x2_t __ret_28; \ + __ret_28 = vset_lane_u32(vget_lane_u32(__s2_28, __p3_28), __s0_28, __p1_28); \ + __ret_28; \ +}) +#else +#define vcopy_lane_u32(__p0_29, __p1_29, __p2_29, __p3_29) __extension__ ({ \ + uint32x2_t __s0_29 = __p0_29; \ + uint32x2_t __s2_29 = __p2_29; \ + uint32x2_t __rev0_29; __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 1, 0); \ + uint32x2_t __rev2_29; __rev2_29 = __builtin_shufflevector(__s2_29, __s2_29, 1, 0); \ + uint32x2_t __ret_29; \ + __ret_29 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_29, __p3_29), __rev0_29, __p1_29); \ + __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 1, 0); \ + __ret_29; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_u64(__p0_30, __p1_30, __p2_30, __p3_30) __extension__ ({ \ + uint64x1_t __s0_30 = __p0_30; \ + uint64x1_t __s2_30 = __p2_30; \ + uint64x1_t __ret_30; \ + __ret_30 = vset_lane_u64(vget_lane_u64(__s2_30, __p3_30), __s0_30, __p1_30); \ + __ret_30; \ +}) +#else +#define vcopy_lane_u64(__p0_31, __p1_31, __p2_31, __p3_31) __extension__ ({ \ + uint64x1_t __s0_31 = __p0_31; \ + uint64x1_t __s2_31 = __p2_31; \ + uint64x1_t __ret_31; \ + __ret_31 = __noswap_vset_lane_u64(__noswap_vget_lane_u64(__s2_31, __p3_31), __s0_31, __p1_31); \ + __ret_31; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_u16(__p0_32, __p1_32, __p2_32, __p3_32) __extension__ ({ \ + uint16x4_t __s0_32 = __p0_32; \ + uint16x4_t __s2_32 = __p2_32; \ + uint16x4_t __ret_32; \ + __ret_32 = vset_lane_u16(vget_lane_u16(__s2_32, __p3_32), __s0_32, __p1_32); \ + __ret_32; \ +}) +#else +#define vcopy_lane_u16(__p0_33, __p1_33, __p2_33, __p3_33) __extension__ ({ \ + uint16x4_t __s0_33 = __p0_33; \ + uint16x4_t __s2_33 = __p2_33; \ + uint16x4_t __rev0_33; __rev0_33 = __builtin_shufflevector(__s0_33, __s0_33, 3, 2, 1, 0); \ + uint16x4_t __rev2_33; __rev2_33 = __builtin_shufflevector(__s2_33, __s2_33, 3, 2, 1, 0); \ + uint16x4_t __ret_33; \ + __ret_33 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_33, __p3_33), __rev0_33, __p1_33); \ + __ret_33 = __builtin_shufflevector(__ret_33, __ret_33, 3, 2, 1, 0); \ + __ret_33; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_s8(__p0_34, __p1_34, __p2_34, __p3_34) __extension__ ({ \ + int8x8_t __s0_34 = __p0_34; \ + int8x8_t __s2_34 = __p2_34; \ + int8x8_t __ret_34; \ + __ret_34 = vset_lane_s8(vget_lane_s8(__s2_34, __p3_34), __s0_34, __p1_34); \ + __ret_34; \ +}) +#else +#define vcopy_lane_s8(__p0_35, __p1_35, __p2_35, __p3_35) __extension__ ({ \ + int8x8_t __s0_35 = __p0_35; \ + int8x8_t __s2_35 = __p2_35; \ + int8x8_t __rev0_35; __rev0_35 = __builtin_shufflevector(__s0_35, __s0_35, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_35; __rev2_35 = __builtin_shufflevector(__s2_35, __s2_35, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret_35; \ + __ret_35 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_35, __p3_35), __rev0_35, __p1_35); \ + __ret_35 = __builtin_shufflevector(__ret_35, __ret_35, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_35; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_f32(__p0_36, __p1_36, __p2_36, __p3_36) __extension__ ({ \ + float32x2_t __s0_36 = __p0_36; \ + float32x2_t __s2_36 = __p2_36; \ + float32x2_t __ret_36; \ + __ret_36 = vset_lane_f32(vget_lane_f32(__s2_36, __p3_36), __s0_36, __p1_36); \ + __ret_36; \ +}) +#else +#define vcopy_lane_f32(__p0_37, __p1_37, __p2_37, __p3_37) __extension__ ({ \ + float32x2_t __s0_37 = __p0_37; \ + float32x2_t __s2_37 = __p2_37; \ + float32x2_t __rev0_37; __rev0_37 = __builtin_shufflevector(__s0_37, __s0_37, 1, 0); \ + float32x2_t __rev2_37; __rev2_37 = __builtin_shufflevector(__s2_37, __s2_37, 1, 0); \ + float32x2_t __ret_37; \ + __ret_37 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_37, __p3_37), __rev0_37, __p1_37); \ + __ret_37 = __builtin_shufflevector(__ret_37, __ret_37, 1, 0); \ + __ret_37; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_s32(__p0_38, __p1_38, __p2_38, __p3_38) __extension__ ({ \ + int32x2_t __s0_38 = __p0_38; \ + int32x2_t __s2_38 = __p2_38; \ + int32x2_t __ret_38; \ + __ret_38 = vset_lane_s32(vget_lane_s32(__s2_38, __p3_38), __s0_38, __p1_38); \ + __ret_38; \ +}) +#else +#define vcopy_lane_s32(__p0_39, __p1_39, __p2_39, __p3_39) __extension__ ({ \ + int32x2_t __s0_39 = __p0_39; \ + int32x2_t __s2_39 = __p2_39; \ + int32x2_t __rev0_39; __rev0_39 = __builtin_shufflevector(__s0_39, __s0_39, 1, 0); \ + int32x2_t __rev2_39; __rev2_39 = __builtin_shufflevector(__s2_39, __s2_39, 1, 0); \ + int32x2_t __ret_39; \ + __ret_39 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_39, __p3_39), __rev0_39, __p1_39); \ + __ret_39 = __builtin_shufflevector(__ret_39, __ret_39, 1, 0); \ + __ret_39; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_s64(__p0_40, __p1_40, __p2_40, __p3_40) __extension__ ({ \ + int64x1_t __s0_40 = __p0_40; \ + int64x1_t __s2_40 = __p2_40; \ + int64x1_t __ret_40; \ + __ret_40 = vset_lane_s64(vget_lane_s64(__s2_40, __p3_40), __s0_40, __p1_40); \ + __ret_40; \ +}) +#else +#define vcopy_lane_s64(__p0_41, __p1_41, __p2_41, __p3_41) __extension__ ({ \ + int64x1_t __s0_41 = __p0_41; \ + int64x1_t __s2_41 = __p2_41; \ + int64x1_t __ret_41; \ + __ret_41 = __noswap_vset_lane_s64(__noswap_vget_lane_s64(__s2_41, __p3_41), __s0_41, __p1_41); \ + __ret_41; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_s16(__p0_42, __p1_42, __p2_42, __p3_42) __extension__ ({ \ + int16x4_t __s0_42 = __p0_42; \ + int16x4_t __s2_42 = __p2_42; \ + int16x4_t __ret_42; \ + __ret_42 = vset_lane_s16(vget_lane_s16(__s2_42, __p3_42), __s0_42, __p1_42); \ + __ret_42; \ +}) +#else +#define vcopy_lane_s16(__p0_43, __p1_43, __p2_43, __p3_43) __extension__ ({ \ + int16x4_t __s0_43 = __p0_43; \ + int16x4_t __s2_43 = __p2_43; \ + int16x4_t __rev0_43; __rev0_43 = __builtin_shufflevector(__s0_43, __s0_43, 3, 2, 1, 0); \ + int16x4_t __rev2_43; __rev2_43 = __builtin_shufflevector(__s2_43, __s2_43, 3, 2, 1, 0); \ + int16x4_t __ret_43; \ + __ret_43 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_43, __p3_43), __rev0_43, __p1_43); \ + __ret_43 = __builtin_shufflevector(__ret_43, __ret_43, 3, 2, 1, 0); \ + __ret_43; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_p8(__p0_44, __p1_44, __p2_44, __p3_44) __extension__ ({ \ + poly8x16_t __s0_44 = __p0_44; \ + poly8x16_t __s2_44 = __p2_44; \ + poly8x16_t __ret_44; \ + __ret_44 = vsetq_lane_p8(vgetq_lane_p8(__s2_44, __p3_44), __s0_44, __p1_44); \ + __ret_44; \ +}) +#else +#define vcopyq_laneq_p8(__p0_45, __p1_45, __p2_45, __p3_45) __extension__ ({ \ + poly8x16_t __s0_45 = __p0_45; \ + poly8x16_t __s2_45 = __p2_45; \ + poly8x16_t __rev0_45; __rev0_45 = __builtin_shufflevector(__s0_45, __s0_45, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev2_45; __rev2_45 = __builtin_shufflevector(__s2_45, __s2_45, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __ret_45; \ + __ret_45 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_45, __p3_45), __rev0_45, __p1_45); \ + __ret_45 = __builtin_shufflevector(__ret_45, __ret_45, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_45; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_p16(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \ + poly16x8_t __s0_46 = __p0_46; \ + poly16x8_t __s2_46 = __p2_46; \ + poly16x8_t __ret_46; \ + __ret_46 = vsetq_lane_p16(vgetq_lane_p16(__s2_46, __p3_46), __s0_46, __p1_46); \ + __ret_46; \ +}) +#else +#define vcopyq_laneq_p16(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \ + poly16x8_t __s0_47 = __p0_47; \ + poly16x8_t __s2_47 = __p2_47; \ + poly16x8_t __rev0_47; __rev0_47 = __builtin_shufflevector(__s0_47, __s0_47, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __rev2_47; __rev2_47 = __builtin_shufflevector(__s2_47, __s2_47, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __ret_47; \ + __ret_47 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_47, __p3_47), __rev0_47, __p1_47); \ + __ret_47 = __builtin_shufflevector(__ret_47, __ret_47, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_47; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_u8(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \ + uint8x16_t __s0_48 = __p0_48; \ + uint8x16_t __s2_48 = __p2_48; \ + uint8x16_t __ret_48; \ + __ret_48 = vsetq_lane_u8(vgetq_lane_u8(__s2_48, __p3_48), __s0_48, __p1_48); \ + __ret_48; \ +}) +#else +#define vcopyq_laneq_u8(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \ + uint8x16_t __s0_49 = __p0_49; \ + uint8x16_t __s2_49 = __p2_49; \ + uint8x16_t __rev0_49; __rev0_49 = __builtin_shufflevector(__s0_49, __s0_49, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_49; __rev2_49 = __builtin_shufflevector(__s2_49, __s2_49, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret_49; \ + __ret_49 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_49, __p3_49), __rev0_49, __p1_49); \ + __ret_49 = __builtin_shufflevector(__ret_49, __ret_49, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_49; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_u32(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \ + uint32x4_t __s0_50 = __p0_50; \ + uint32x4_t __s2_50 = __p2_50; \ + uint32x4_t __ret_50; \ + __ret_50 = vsetq_lane_u32(vgetq_lane_u32(__s2_50, __p3_50), __s0_50, __p1_50); \ + __ret_50; \ +}) +#else +#define vcopyq_laneq_u32(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \ + uint32x4_t __s0_51 = __p0_51; \ + uint32x4_t __s2_51 = __p2_51; \ + uint32x4_t __rev0_51; __rev0_51 = __builtin_shufflevector(__s0_51, __s0_51, 3, 2, 1, 0); \ + uint32x4_t __rev2_51; __rev2_51 = __builtin_shufflevector(__s2_51, __s2_51, 3, 2, 1, 0); \ + uint32x4_t __ret_51; \ + __ret_51 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_51, __p3_51), __rev0_51, __p1_51); \ + __ret_51 = __builtin_shufflevector(__ret_51, __ret_51, 3, 2, 1, 0); \ + __ret_51; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_u64(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \ + uint64x2_t __s0_52 = __p0_52; \ + uint64x2_t __s2_52 = __p2_52; \ + uint64x2_t __ret_52; \ + __ret_52 = vsetq_lane_u64(vgetq_lane_u64(__s2_52, __p3_52), __s0_52, __p1_52); \ + __ret_52; \ +}) +#else +#define vcopyq_laneq_u64(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \ + uint64x2_t __s0_53 = __p0_53; \ + uint64x2_t __s2_53 = __p2_53; \ + uint64x2_t __rev0_53; __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 1, 0); \ + uint64x2_t __rev2_53; __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 1, 0); \ + uint64x2_t __ret_53; \ + __ret_53 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_53, __p3_53), __rev0_53, __p1_53); \ + __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 1, 0); \ + __ret_53; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_u16(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \ + uint16x8_t __s0_54 = __p0_54; \ + uint16x8_t __s2_54 = __p2_54; \ + uint16x8_t __ret_54; \ + __ret_54 = vsetq_lane_u16(vgetq_lane_u16(__s2_54, __p3_54), __s0_54, __p1_54); \ + __ret_54; \ +}) +#else +#define vcopyq_laneq_u16(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \ + uint16x8_t __s0_55 = __p0_55; \ + uint16x8_t __s2_55 = __p2_55; \ + uint16x8_t __rev0_55; __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_55; __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret_55; \ + __ret_55 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_55, __p3_55), __rev0_55, __p1_55); \ + __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_55; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_s8(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \ + int8x16_t __s0_56 = __p0_56; \ + int8x16_t __s2_56 = __p2_56; \ + int8x16_t __ret_56; \ + __ret_56 = vsetq_lane_s8(vgetq_lane_s8(__s2_56, __p3_56), __s0_56, __p1_56); \ + __ret_56; \ +}) +#else +#define vcopyq_laneq_s8(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \ + int8x16_t __s0_57 = __p0_57; \ + int8x16_t __s2_57 = __p2_57; \ + int8x16_t __rev0_57; __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_57; __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret_57; \ + __ret_57 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_57, __p3_57), __rev0_57, __p1_57); \ + __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_57; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_f32(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \ + float32x4_t __s0_58 = __p0_58; \ + float32x4_t __s2_58 = __p2_58; \ + float32x4_t __ret_58; \ + __ret_58 = vsetq_lane_f32(vgetq_lane_f32(__s2_58, __p3_58), __s0_58, __p1_58); \ + __ret_58; \ +}) +#else +#define vcopyq_laneq_f32(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \ + float32x4_t __s0_59 = __p0_59; \ + float32x4_t __s2_59 = __p2_59; \ + float32x4_t __rev0_59; __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 3, 2, 1, 0); \ + float32x4_t __rev2_59; __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 3, 2, 1, 0); \ + float32x4_t __ret_59; \ + __ret_59 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_59, __p3_59), __rev0_59, __p1_59); \ + __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 3, 2, 1, 0); \ + __ret_59; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_s32(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \ + int32x4_t __s0_60 = __p0_60; \ + int32x4_t __s2_60 = __p2_60; \ + int32x4_t __ret_60; \ + __ret_60 = vsetq_lane_s32(vgetq_lane_s32(__s2_60, __p3_60), __s0_60, __p1_60); \ + __ret_60; \ +}) +#else +#define vcopyq_laneq_s32(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \ + int32x4_t __s0_61 = __p0_61; \ + int32x4_t __s2_61 = __p2_61; \ + int32x4_t __rev0_61; __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 3, 2, 1, 0); \ + int32x4_t __rev2_61; __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 3, 2, 1, 0); \ + int32x4_t __ret_61; \ + __ret_61 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_61, __p3_61), __rev0_61, __p1_61); \ + __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 3, 2, 1, 0); \ + __ret_61; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_s64(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \ + int64x2_t __s0_62 = __p0_62; \ + int64x2_t __s2_62 = __p2_62; \ + int64x2_t __ret_62; \ + __ret_62 = vsetq_lane_s64(vgetq_lane_s64(__s2_62, __p3_62), __s0_62, __p1_62); \ + __ret_62; \ +}) +#else +#define vcopyq_laneq_s64(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \ + int64x2_t __s0_63 = __p0_63; \ + int64x2_t __s2_63 = __p2_63; \ + int64x2_t __rev0_63; __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 1, 0); \ + int64x2_t __rev2_63; __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 1, 0); \ + int64x2_t __ret_63; \ + __ret_63 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_63, __p3_63), __rev0_63, __p1_63); \ + __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 1, 0); \ + __ret_63; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_s16(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \ + int16x8_t __s0_64 = __p0_64; \ + int16x8_t __s2_64 = __p2_64; \ + int16x8_t __ret_64; \ + __ret_64 = vsetq_lane_s16(vgetq_lane_s16(__s2_64, __p3_64), __s0_64, __p1_64); \ + __ret_64; \ +}) +#else +#define vcopyq_laneq_s16(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \ + int16x8_t __s0_65 = __p0_65; \ + int16x8_t __s2_65 = __p2_65; \ + int16x8_t __rev0_65; __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_65; __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret_65; \ + __ret_65 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_65, __p3_65), __rev0_65, __p1_65); \ + __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_65; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_p8(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \ + poly8x8_t __s0_66 = __p0_66; \ + poly8x16_t __s2_66 = __p2_66; \ + poly8x8_t __ret_66; \ + __ret_66 = vset_lane_p8(vgetq_lane_p8(__s2_66, __p3_66), __s0_66, __p1_66); \ + __ret_66; \ +}) +#else +#define vcopy_laneq_p8(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \ + poly8x8_t __s0_67 = __p0_67; \ + poly8x16_t __s2_67 = __p2_67; \ + poly8x8_t __rev0_67; __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev2_67; __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __ret_67; \ + __ret_67 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_67, __p3_67), __rev0_67, __p1_67); \ + __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_67; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_p16(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \ + poly16x4_t __s0_68 = __p0_68; \ + poly16x8_t __s2_68 = __p2_68; \ + poly16x4_t __ret_68; \ + __ret_68 = vset_lane_p16(vgetq_lane_p16(__s2_68, __p3_68), __s0_68, __p1_68); \ + __ret_68; \ +}) +#else +#define vcopy_laneq_p16(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \ + poly16x4_t __s0_69 = __p0_69; \ + poly16x8_t __s2_69 = __p2_69; \ + poly16x4_t __rev0_69; __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 3, 2, 1, 0); \ + poly16x8_t __rev2_69; __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x4_t __ret_69; \ + __ret_69 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_69, __p3_69), __rev0_69, __p1_69); \ + __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 3, 2, 1, 0); \ + __ret_69; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_u8(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \ + uint8x8_t __s0_70 = __p0_70; \ + uint8x16_t __s2_70 = __p2_70; \ + uint8x8_t __ret_70; \ + __ret_70 = vset_lane_u8(vgetq_lane_u8(__s2_70, __p3_70), __s0_70, __p1_70); \ + __ret_70; \ +}) +#else +#define vcopy_laneq_u8(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \ + uint8x8_t __s0_71 = __p0_71; \ + uint8x16_t __s2_71 = __p2_71; \ + uint8x8_t __rev0_71; __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_71; __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret_71; \ + __ret_71 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_71, __p3_71), __rev0_71, __p1_71); \ + __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_71; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_u32(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \ + uint32x2_t __s0_72 = __p0_72; \ + uint32x4_t __s2_72 = __p2_72; \ + uint32x2_t __ret_72; \ + __ret_72 = vset_lane_u32(vgetq_lane_u32(__s2_72, __p3_72), __s0_72, __p1_72); \ + __ret_72; \ +}) +#else +#define vcopy_laneq_u32(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \ + uint32x2_t __s0_73 = __p0_73; \ + uint32x4_t __s2_73 = __p2_73; \ + uint32x2_t __rev0_73; __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 1, 0); \ + uint32x4_t __rev2_73; __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 3, 2, 1, 0); \ + uint32x2_t __ret_73; \ + __ret_73 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_73, __p3_73), __rev0_73, __p1_73); \ + __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 1, 0); \ + __ret_73; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_u64(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \ + uint64x1_t __s0_74 = __p0_74; \ + uint64x2_t __s2_74 = __p2_74; \ + uint64x1_t __ret_74; \ + __ret_74 = vset_lane_u64(vgetq_lane_u64(__s2_74, __p3_74), __s0_74, __p1_74); \ + __ret_74; \ +}) +#else +#define vcopy_laneq_u64(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \ + uint64x1_t __s0_75 = __p0_75; \ + uint64x2_t __s2_75 = __p2_75; \ + uint64x2_t __rev2_75; __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 1, 0); \ + uint64x1_t __ret_75; \ + __ret_75 = __noswap_vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_75, __p3_75), __s0_75, __p1_75); \ + __ret_75; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_u16(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \ + uint16x4_t __s0_76 = __p0_76; \ + uint16x8_t __s2_76 = __p2_76; \ + uint16x4_t __ret_76; \ + __ret_76 = vset_lane_u16(vgetq_lane_u16(__s2_76, __p3_76), __s0_76, __p1_76); \ + __ret_76; \ +}) +#else +#define vcopy_laneq_u16(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \ + uint16x4_t __s0_77 = __p0_77; \ + uint16x8_t __s2_77 = __p2_77; \ + uint16x4_t __rev0_77; __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 3, 2, 1, 0); \ + uint16x8_t __rev2_77; __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __ret_77; \ + __ret_77 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_77, __p3_77), __rev0_77, __p1_77); \ + __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 3, 2, 1, 0); \ + __ret_77; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_s8(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \ + int8x8_t __s0_78 = __p0_78; \ + int8x16_t __s2_78 = __p2_78; \ + int8x8_t __ret_78; \ + __ret_78 = vset_lane_s8(vgetq_lane_s8(__s2_78, __p3_78), __s0_78, __p1_78); \ + __ret_78; \ +}) +#else +#define vcopy_laneq_s8(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \ + int8x8_t __s0_79 = __p0_79; \ + int8x16_t __s2_79 = __p2_79; \ + int8x8_t __rev0_79; __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_79; __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret_79; \ + __ret_79 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_79, __p3_79), __rev0_79, __p1_79); \ + __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_79; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_f32(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \ + float32x2_t __s0_80 = __p0_80; \ + float32x4_t __s2_80 = __p2_80; \ + float32x2_t __ret_80; \ + __ret_80 = vset_lane_f32(vgetq_lane_f32(__s2_80, __p3_80), __s0_80, __p1_80); \ + __ret_80; \ +}) +#else +#define vcopy_laneq_f32(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \ + float32x2_t __s0_81 = __p0_81; \ + float32x4_t __s2_81 = __p2_81; \ + float32x2_t __rev0_81; __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 1, 0); \ + float32x4_t __rev2_81; __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 3, 2, 1, 0); \ + float32x2_t __ret_81; \ + __ret_81 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_81, __p3_81), __rev0_81, __p1_81); \ + __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 1, 0); \ + __ret_81; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_s32(__p0_82, __p1_82, __p2_82, __p3_82) __extension__ ({ \ + int32x2_t __s0_82 = __p0_82; \ + int32x4_t __s2_82 = __p2_82; \ + int32x2_t __ret_82; \ + __ret_82 = vset_lane_s32(vgetq_lane_s32(__s2_82, __p3_82), __s0_82, __p1_82); \ + __ret_82; \ +}) +#else +#define vcopy_laneq_s32(__p0_83, __p1_83, __p2_83, __p3_83) __extension__ ({ \ + int32x2_t __s0_83 = __p0_83; \ + int32x4_t __s2_83 = __p2_83; \ + int32x2_t __rev0_83; __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 1, 0); \ + int32x4_t __rev2_83; __rev2_83 = __builtin_shufflevector(__s2_83, __s2_83, 3, 2, 1, 0); \ + int32x2_t __ret_83; \ + __ret_83 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_83, __p3_83), __rev0_83, __p1_83); \ + __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 1, 0); \ + __ret_83; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_s64(__p0_84, __p1_84, __p2_84, __p3_84) __extension__ ({ \ + int64x1_t __s0_84 = __p0_84; \ + int64x2_t __s2_84 = __p2_84; \ + int64x1_t __ret_84; \ + __ret_84 = vset_lane_s64(vgetq_lane_s64(__s2_84, __p3_84), __s0_84, __p1_84); \ + __ret_84; \ +}) +#else +#define vcopy_laneq_s64(__p0_85, __p1_85, __p2_85, __p3_85) __extension__ ({ \ + int64x1_t __s0_85 = __p0_85; \ + int64x2_t __s2_85 = __p2_85; \ + int64x2_t __rev2_85; __rev2_85 = __builtin_shufflevector(__s2_85, __s2_85, 1, 0); \ + int64x1_t __ret_85; \ + __ret_85 = __noswap_vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_85, __p3_85), __s0_85, __p1_85); \ + __ret_85; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_s16(__p0_86, __p1_86, __p2_86, __p3_86) __extension__ ({ \ + int16x4_t __s0_86 = __p0_86; \ + int16x8_t __s2_86 = __p2_86; \ + int16x4_t __ret_86; \ + __ret_86 = vset_lane_s16(vgetq_lane_s16(__s2_86, __p3_86), __s0_86, __p1_86); \ + __ret_86; \ +}) +#else +#define vcopy_laneq_s16(__p0_87, __p1_87, __p2_87, __p3_87) __extension__ ({ \ + int16x4_t __s0_87 = __p0_87; \ + int16x8_t __s2_87 = __p2_87; \ + int16x4_t __rev0_87; __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \ + int16x8_t __rev2_87; __rev2_87 = __builtin_shufflevector(__s2_87, __s2_87, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __ret_87; \ + __ret_87 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_87, __p3_87), __rev0_87, __p1_87); \ + __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 3, 2, 1, 0); \ + __ret_87; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x1_t vcreate_p64(uint64_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#else +__ai poly64x1_t vcreate_p64(uint64_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vcreate_f64(uint64_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#else +__ai float64x1_t vcreate_f64(uint64_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vcvts_f32_s32(int32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0); + return __ret; +} +#else +__ai float32_t vcvts_f32_s32(int32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vcvts_f32_u32(uint32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0); + return __ret; +} +#else +__ai float32_t vcvts_f32_u32(uint32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vcvtd_f64_s64(int64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0); + return __ret; +} +#else +__ai float64_t vcvtd_f64_s64(int64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vcvtd_f64_u64(uint64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0); + return __ret; +} +#else +__ai float64_t vcvtd_f64_u64(uint64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19); + return __ret; +} +#else +__ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vcvt_f64_s64(int64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3); + return __ret; +} +#else +__ai float64x1_t vcvt_f64_s64(int64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) { + float16x8_t __ret; + __ret = vcombine_f16(__p0, vcvt_f16_f32(__p1)); + return __ret; +} +#else +__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) { + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x8_t __ret; + __ret = __noswap_vcombine_f16(__rev0, __noswap_vcvt_f16_f32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) { + float32x4_t __ret; + __ret = vcvt_f32_f16(vget_high_f16(__p0)); + return __ret; +} +#else +__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) { + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __noswap_vcvt_f32_f16(__noswap_vget_high_f16(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { + float32x4_t __ret; + __ret = vcombine_f32(__p0, vcvt_f32_f64(__p1)); + return __ret; +} +#else +__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x4_t __ret; + __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvt_f32_f64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) { + float64x2_t __ret; + __ret = vcvt_f64_f32(vget_high_f32(__p0)); + return __ret; +} +#else +__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float64x2_t __ret; + __ret = __noswap_vcvt_f64_f32(__noswap_vget_high_f32(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \ + uint32_t __s0 = __p0; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \ + __ret; \ +}) +#else +#define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \ + uint32_t __s0 = __p0; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \ + int32_t __s0 = __p0; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \ + __ret; \ +}) +#else +#define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \ + int32_t __s0 = __p0; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#else +#define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#else +#define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \ + uint64_t __s0 = __p0; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \ + __ret; \ +}) +#else +#define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \ + uint64_t __s0 = __p0; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \ + __ret; \ +}) +#else +#define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \ + float32_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \ + __ret; \ +}) +#else +#define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \ + float32_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#else +#define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \ + float64_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \ + __ret; \ +}) +#else +#define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \ + float64_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \ + float32_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \ + __ret; \ +}) +#else +#define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \ + float32_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#else +#define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \ + float64_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \ + __ret; \ +}) +#else +#define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \ + float64_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vcvts_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0); + return __ret; +} +#else +__ai int32_t vcvts_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vcvtd_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0); + return __ret; +} +#else +__ai int64_t vcvtd_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vcvt_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3); + return __ret; +} +#else +__ai int64x1_t vcvt_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vcvts_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0); + return __ret; +} +#else +__ai uint32_t vcvts_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vcvtd_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0); + return __ret; +} +#else +__ai uint64_t vcvtd_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19); + return __ret; +} +#else +__ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vcvtas_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0); + return __ret; +} +#else +__ai int32_t vcvtas_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vcvtad_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0); + return __ret; +} +#else +__ai int64_t vcvtad_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vcvtas_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0); + return __ret; +} +#else +__ai uint32_t vcvtas_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vcvtad_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0); + return __ret; +} +#else +__ai uint64_t vcvtad_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vcvtms_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0); + return __ret; +} +#else +__ai int32_t vcvtms_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vcvtmd_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0); + return __ret; +} +#else +__ai int64_t vcvtmd_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vcvtms_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0); + return __ret; +} +#else +__ai uint32_t vcvtms_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vcvtmd_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0); + return __ret; +} +#else +__ai uint64_t vcvtmd_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vcvtns_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0); + return __ret; +} +#else +__ai int32_t vcvtns_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vcvtnd_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0); + return __ret; +} +#else +__ai int64_t vcvtnd_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vcvtns_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0); + return __ret; +} +#else +__ai uint32_t vcvtns_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vcvtnd_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0); + return __ret; +} +#else +__ai uint64_t vcvtnd_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vcvtps_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0); + return __ret; +} +#else +__ai int32_t vcvtps_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vcvtpd_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0); + return __ret; +} +#else +__ai int64_t vcvtpd_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vcvtps_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0); + return __ret; +} +#else +__ai uint32_t vcvtps_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vcvtpd_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0); + return __ret; +} +#else +__ai uint64_t vcvtpd_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vcvtxd_f32_f64(float64_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0); + return __ret; +} +#else +__ai float32_t vcvtxd_f32_f64(float64_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { + float32x4_t __ret; + __ret = vcombine_f32(__p0, vcvtx_f32_f64(__p1)); + return __ret; +} +#else +__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x4_t __ret; + __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvtx_f32_f64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#else +__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __rev0 / __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#else +__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __rev0 / __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#else +__ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#else +__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __rev0 / __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8_t __ret; \ + __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8_t __ret; \ + __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16_t __ret; \ + __ret = (poly16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + poly16_t __ret; \ + __ret = (poly16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdups_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdups_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupd_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupd_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupd_lane_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vdupd_lane_f64((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupd_lane_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vdupd_lane_f64((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdups_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vdups_lane_f32((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdups_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vdups_lane_f32((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdups_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdups_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupd_lane_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupd_lane_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __s0 = __p0; \ + poly64x1_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1); \ + __ret; \ +}) +#else +#define vdup_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __s0 = __p0; \ + poly64x1_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __s0 = __p0; \ + poly64x2_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __s0 = __p0; \ + poly64x2_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x2_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_lane_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x2_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x8_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_lane_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x8_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x1_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1); \ + __ret; \ +}) +#else +#define vdup_lane_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x1_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdup_lane_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8_t __ret; \ + __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8_t __ret; \ + __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16_t __ret; \ + __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16_t __ret; \ + __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vdups_laneq_f32((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vdups_laneq_f32((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x8_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdup_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64x1_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1); \ + __ret; \ +}) +#else +#define vdup_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + poly64x1_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x4_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdup_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x4_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + poly64x2_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdupq_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x8_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdup_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x2_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ + __ret; \ +}) +#else +#define vdup_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x2_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x1_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1); \ + __ret; \ +}) +#else +#define vdup_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x1_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x4_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdup_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x8_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdup_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x1_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1); \ + __ret; \ +}) +#else +#define vdup_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x1_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x2_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ + __ret; \ +}) +#else +#define vdup_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x2_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x4_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdup_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x2_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ + __ret; \ +}) +#else +#define vdup_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x2_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x1_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1); \ + __ret; \ +}) +#else +#define vdup_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x1_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x4_t __ret; \ + __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ + __ret; \ +}) +#else +#define vdup_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x1_t vdup_n_p64(poly64_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t) {__p0}; + return __ret; +} +#else +__ai poly64x1_t vdup_n_p64(poly64_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t) {__p0}; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vdupq_n_p64(poly64_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai poly64x2_t vdupq_n_p64(poly64_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vdupq_n_f64(float64_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai float64x2_t vdupq_n_f64(float64_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vdup_n_f64(float64_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) {__p0}; + return __ret; +} +#else +__ai float64x1_t vdup_n_f64(float64_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) {__p0}; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) +#else +#define vext_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \ + __ret; \ +}) +#else +#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 42); \ + __ret; \ +}) +#else +#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \ + __ret; \ +}) +#else +#define vext_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#else +__ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +__ai float64x1_t __noswap_vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64_t __s0 = __p0; \ + float64_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (int8x8_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64_t __s0 = __p0; \ + float64_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (int8x8_t)__s2, __p3); \ + __ret; \ +}) +#define __noswap_vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64_t __s0 = __p0; \ + float64_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (int8x8_t)__s2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__s2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \ + __ret; \ +}) +#else +#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__s2, __p3, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \ + __ret; \ +}) +#else +#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \ + __ret; \ +}) +#else +#define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \ + __ret; \ +}) +#define __noswap_vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \ + __ret; \ +}) +#else +#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64_t __s0 = __p0; \ + float64_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64_t __s0 = __p0; \ + float64_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64_t __s0 = __p0; \ + float64_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__s2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32_t __ret; \ + __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__s2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \ + __ret; \ +}) +#else +#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \ + __ret; \ +}) +#else +#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \ + __ret; \ +}) +#else +#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__rev2, __p3, 10); \ + __ret; \ +}) +#define __noswap_vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \ + __ret; \ +}) +#else +#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { + float64x2_t __ret; + __ret = vfmaq_f64(__p0, __p1, (float64x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __noswap_vfmaq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + __ret = vfmaq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __noswap_vfmaq_f32(__rev0, __rev1, (float32x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + __ret = vfma_f32(__p0, __p1, (float32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __noswap_vfma_f32(__rev0, __rev1, (float32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = vfmaq_f64(__p0, -__p1, __p2); + return __ret; +} +#else +__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float64x2_t __ret; + __ret = __noswap_vfmaq_f64(__rev0, -__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = vfma_f64(__p0, -__p1, __p2); + return __ret; +} +#else +__ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = __noswap_vfma_f64(__p0, -__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsd_lane_f64(__p0_88, __p1_88, __p2_88, __p3_88) __extension__ ({ \ + float64_t __s0_88 = __p0_88; \ + float64_t __s1_88 = __p1_88; \ + float64x1_t __s2_88 = __p2_88; \ + float64_t __ret_88; \ + __ret_88 = vfmad_lane_f64(__s0_88, -__s1_88, __s2_88, __p3_88); \ + __ret_88; \ +}) +#else +#define vfmsd_lane_f64(__p0_89, __p1_89, __p2_89, __p3_89) __extension__ ({ \ + float64_t __s0_89 = __p0_89; \ + float64_t __s1_89 = __p1_89; \ + float64x1_t __s2_89 = __p2_89; \ + float64_t __ret_89; \ + __ret_89 = __noswap_vfmad_lane_f64(__s0_89, -__s1_89, __s2_89, __p3_89); \ + __ret_89; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmss_lane_f32(__p0_90, __p1_90, __p2_90, __p3_90) __extension__ ({ \ + float32_t __s0_90 = __p0_90; \ + float32_t __s1_90 = __p1_90; \ + float32x2_t __s2_90 = __p2_90; \ + float32_t __ret_90; \ + __ret_90 = vfmas_lane_f32(__s0_90, -__s1_90, __s2_90, __p3_90); \ + __ret_90; \ +}) +#else +#define vfmss_lane_f32(__p0_91, __p1_91, __p2_91, __p3_91) __extension__ ({ \ + float32_t __s0_91 = __p0_91; \ + float32_t __s1_91 = __p1_91; \ + float32x2_t __s2_91 = __p2_91; \ + float32x2_t __rev2_91; __rev2_91 = __builtin_shufflevector(__s2_91, __s2_91, 1, 0); \ + float32_t __ret_91; \ + __ret_91 = __noswap_vfmas_lane_f32(__s0_91, -__s1_91, __rev2_91, __p3_91); \ + __ret_91; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_lane_f64(__p0_92, __p1_92, __p2_92, __p3_92) __extension__ ({ \ + float64x2_t __s0_92 = __p0_92; \ + float64x2_t __s1_92 = __p1_92; \ + float64x1_t __s2_92 = __p2_92; \ + float64x2_t __ret_92; \ + __ret_92 = vfmaq_lane_f64(__s0_92, -__s1_92, __s2_92, __p3_92); \ + __ret_92; \ +}) +#else +#define vfmsq_lane_f64(__p0_93, __p1_93, __p2_93, __p3_93) __extension__ ({ \ + float64x2_t __s0_93 = __p0_93; \ + float64x2_t __s1_93 = __p1_93; \ + float64x1_t __s2_93 = __p2_93; \ + float64x2_t __rev0_93; __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 1, 0); \ + float64x2_t __rev1_93; __rev1_93 = __builtin_shufflevector(__s1_93, __s1_93, 1, 0); \ + float64x2_t __ret_93; \ + __ret_93 = __noswap_vfmaq_lane_f64(__rev0_93, -__rev1_93, __s2_93, __p3_93); \ + __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 1, 0); \ + __ret_93; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_lane_f32(__p0_94, __p1_94, __p2_94, __p3_94) __extension__ ({ \ + float32x4_t __s0_94 = __p0_94; \ + float32x4_t __s1_94 = __p1_94; \ + float32x2_t __s2_94 = __p2_94; \ + float32x4_t __ret_94; \ + __ret_94 = vfmaq_lane_f32(__s0_94, -__s1_94, __s2_94, __p3_94); \ + __ret_94; \ +}) +#else +#define vfmsq_lane_f32(__p0_95, __p1_95, __p2_95, __p3_95) __extension__ ({ \ + float32x4_t __s0_95 = __p0_95; \ + float32x4_t __s1_95 = __p1_95; \ + float32x2_t __s2_95 = __p2_95; \ + float32x4_t __rev0_95; __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 3, 2, 1, 0); \ + float32x4_t __rev1_95; __rev1_95 = __builtin_shufflevector(__s1_95, __s1_95, 3, 2, 1, 0); \ + float32x2_t __rev2_95; __rev2_95 = __builtin_shufflevector(__s2_95, __s2_95, 1, 0); \ + float32x4_t __ret_95; \ + __ret_95 = __noswap_vfmaq_lane_f32(__rev0_95, -__rev1_95, __rev2_95, __p3_95); \ + __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 3, 2, 1, 0); \ + __ret_95; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_lane_f64(__p0_96, __p1_96, __p2_96, __p3_96) __extension__ ({ \ + float64x1_t __s0_96 = __p0_96; \ + float64x1_t __s1_96 = __p1_96; \ + float64x1_t __s2_96 = __p2_96; \ + float64x1_t __ret_96; \ + __ret_96 = vfma_lane_f64(__s0_96, -__s1_96, __s2_96, __p3_96); \ + __ret_96; \ +}) +#else +#define vfms_lane_f64(__p0_97, __p1_97, __p2_97, __p3_97) __extension__ ({ \ + float64x1_t __s0_97 = __p0_97; \ + float64x1_t __s1_97 = __p1_97; \ + float64x1_t __s2_97 = __p2_97; \ + float64x1_t __ret_97; \ + __ret_97 = __noswap_vfma_lane_f64(__s0_97, -__s1_97, __s2_97, __p3_97); \ + __ret_97; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_lane_f32(__p0_98, __p1_98, __p2_98, __p3_98) __extension__ ({ \ + float32x2_t __s0_98 = __p0_98; \ + float32x2_t __s1_98 = __p1_98; \ + float32x2_t __s2_98 = __p2_98; \ + float32x2_t __ret_98; \ + __ret_98 = vfma_lane_f32(__s0_98, -__s1_98, __s2_98, __p3_98); \ + __ret_98; \ +}) +#else +#define vfms_lane_f32(__p0_99, __p1_99, __p2_99, __p3_99) __extension__ ({ \ + float32x2_t __s0_99 = __p0_99; \ + float32x2_t __s1_99 = __p1_99; \ + float32x2_t __s2_99 = __p2_99; \ + float32x2_t __rev0_99; __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 1, 0); \ + float32x2_t __rev1_99; __rev1_99 = __builtin_shufflevector(__s1_99, __s1_99, 1, 0); \ + float32x2_t __rev2_99; __rev2_99 = __builtin_shufflevector(__s2_99, __s2_99, 1, 0); \ + float32x2_t __ret_99; \ + __ret_99 = __noswap_vfma_lane_f32(__rev0_99, -__rev1_99, __rev2_99, __p3_99); \ + __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 1, 0); \ + __ret_99; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsd_laneq_f64(__p0_100, __p1_100, __p2_100, __p3_100) __extension__ ({ \ + float64_t __s0_100 = __p0_100; \ + float64_t __s1_100 = __p1_100; \ + float64x2_t __s2_100 = __p2_100; \ + float64_t __ret_100; \ + __ret_100 = vfmad_laneq_f64(__s0_100, -__s1_100, __s2_100, __p3_100); \ + __ret_100; \ +}) +#else +#define vfmsd_laneq_f64(__p0_101, __p1_101, __p2_101, __p3_101) __extension__ ({ \ + float64_t __s0_101 = __p0_101; \ + float64_t __s1_101 = __p1_101; \ + float64x2_t __s2_101 = __p2_101; \ + float64x2_t __rev2_101; __rev2_101 = __builtin_shufflevector(__s2_101, __s2_101, 1, 0); \ + float64_t __ret_101; \ + __ret_101 = __noswap_vfmad_laneq_f64(__s0_101, -__s1_101, __rev2_101, __p3_101); \ + __ret_101; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmss_laneq_f32(__p0_102, __p1_102, __p2_102, __p3_102) __extension__ ({ \ + float32_t __s0_102 = __p0_102; \ + float32_t __s1_102 = __p1_102; \ + float32x4_t __s2_102 = __p2_102; \ + float32_t __ret_102; \ + __ret_102 = vfmas_laneq_f32(__s0_102, -__s1_102, __s2_102, __p3_102); \ + __ret_102; \ +}) +#else +#define vfmss_laneq_f32(__p0_103, __p1_103, __p2_103, __p3_103) __extension__ ({ \ + float32_t __s0_103 = __p0_103; \ + float32_t __s1_103 = __p1_103; \ + float32x4_t __s2_103 = __p2_103; \ + float32x4_t __rev2_103; __rev2_103 = __builtin_shufflevector(__s2_103, __s2_103, 3, 2, 1, 0); \ + float32_t __ret_103; \ + __ret_103 = __noswap_vfmas_laneq_f32(__s0_103, -__s1_103, __rev2_103, __p3_103); \ + __ret_103; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_laneq_f64(__p0_104, __p1_104, __p2_104, __p3_104) __extension__ ({ \ + float64x2_t __s0_104 = __p0_104; \ + float64x2_t __s1_104 = __p1_104; \ + float64x2_t __s2_104 = __p2_104; \ + float64x2_t __ret_104; \ + __ret_104 = vfmaq_laneq_f64(__s0_104, -__s1_104, __s2_104, __p3_104); \ + __ret_104; \ +}) +#else +#define vfmsq_laneq_f64(__p0_105, __p1_105, __p2_105, __p3_105) __extension__ ({ \ + float64x2_t __s0_105 = __p0_105; \ + float64x2_t __s1_105 = __p1_105; \ + float64x2_t __s2_105 = __p2_105; \ + float64x2_t __rev0_105; __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, 1, 0); \ + float64x2_t __rev1_105; __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, 1, 0); \ + float64x2_t __rev2_105; __rev2_105 = __builtin_shufflevector(__s2_105, __s2_105, 1, 0); \ + float64x2_t __ret_105; \ + __ret_105 = __noswap_vfmaq_laneq_f64(__rev0_105, -__rev1_105, __rev2_105, __p3_105); \ + __ret_105 = __builtin_shufflevector(__ret_105, __ret_105, 1, 0); \ + __ret_105; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_laneq_f32(__p0_106, __p1_106, __p2_106, __p3_106) __extension__ ({ \ + float32x4_t __s0_106 = __p0_106; \ + float32x4_t __s1_106 = __p1_106; \ + float32x4_t __s2_106 = __p2_106; \ + float32x4_t __ret_106; \ + __ret_106 = vfmaq_laneq_f32(__s0_106, -__s1_106, __s2_106, __p3_106); \ + __ret_106; \ +}) +#else +#define vfmsq_laneq_f32(__p0_107, __p1_107, __p2_107, __p3_107) __extension__ ({ \ + float32x4_t __s0_107 = __p0_107; \ + float32x4_t __s1_107 = __p1_107; \ + float32x4_t __s2_107 = __p2_107; \ + float32x4_t __rev0_107; __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 3, 2, 1, 0); \ + float32x4_t __rev1_107; __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 3, 2, 1, 0); \ + float32x4_t __rev2_107; __rev2_107 = __builtin_shufflevector(__s2_107, __s2_107, 3, 2, 1, 0); \ + float32x4_t __ret_107; \ + __ret_107 = __noswap_vfmaq_laneq_f32(__rev0_107, -__rev1_107, __rev2_107, __p3_107); \ + __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 3, 2, 1, 0); \ + __ret_107; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_laneq_f64(__p0_108, __p1_108, __p2_108, __p3_108) __extension__ ({ \ + float64x1_t __s0_108 = __p0_108; \ + float64x1_t __s1_108 = __p1_108; \ + float64x2_t __s2_108 = __p2_108; \ + float64x1_t __ret_108; \ + __ret_108 = vfma_laneq_f64(__s0_108, -__s1_108, __s2_108, __p3_108); \ + __ret_108; \ +}) +#else +#define vfms_laneq_f64(__p0_109, __p1_109, __p2_109, __p3_109) __extension__ ({ \ + float64x1_t __s0_109 = __p0_109; \ + float64x1_t __s1_109 = __p1_109; \ + float64x2_t __s2_109 = __p2_109; \ + float64x2_t __rev2_109; __rev2_109 = __builtin_shufflevector(__s2_109, __s2_109, 1, 0); \ + float64x1_t __ret_109; \ + __ret_109 = __noswap_vfma_laneq_f64(__s0_109, -__s1_109, __rev2_109, __p3_109); \ + __ret_109; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_laneq_f32(__p0_110, __p1_110, __p2_110, __p3_110) __extension__ ({ \ + float32x2_t __s0_110 = __p0_110; \ + float32x2_t __s1_110 = __p1_110; \ + float32x4_t __s2_110 = __p2_110; \ + float32x2_t __ret_110; \ + __ret_110 = vfma_laneq_f32(__s0_110, -__s1_110, __s2_110, __p3_110); \ + __ret_110; \ +}) +#else +#define vfms_laneq_f32(__p0_111, __p1_111, __p2_111, __p3_111) __extension__ ({ \ + float32x2_t __s0_111 = __p0_111; \ + float32x2_t __s1_111 = __p1_111; \ + float32x4_t __s2_111 = __p2_111; \ + float32x2_t __rev0_111; __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 1, 0); \ + float32x2_t __rev1_111; __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 1, 0); \ + float32x4_t __rev2_111; __rev2_111 = __builtin_shufflevector(__s2_111, __s2_111, 3, 2, 1, 0); \ + float32x2_t __ret_111; \ + __ret_111 = __noswap_vfma_laneq_f32(__rev0_111, -__rev1_111, __rev2_111, __p3_111); \ + __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 1, 0); \ + __ret_111; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { + float64x2_t __ret; + __ret = vfmaq_f64(__p0, -__p1, (float64x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __noswap_vfmaq_f64(__rev0, -__rev1, (float64x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + __ret = vfmaq_f32(__p0, -__p1, (float32x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __noswap_vfmaq_f32(__rev0, -__rev1, (float32x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + __ret = vfma_f32(__p0, -__p1, (float32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __noswap_vfma_f32(__rev0, -__rev1, (float32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x1_t vget_high_p64(poly64x2_t __p0) { + poly64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1); + return __ret; +} +#else +__ai poly64x1_t vget_high_p64(poly64x2_t __p0) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x1_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1); + return __ret; +} +__ai poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) { + poly64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vget_high_f64(float64x2_t __p0) { + float64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1); + return __ret; +} +#else +__ai float64x1_t vget_high_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x1_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __s0 = __p0; \ + poly64_t __ret; \ + __ret = (poly64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __s0 = __p0; \ + poly64_t __ret; \ + __ret = (poly64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __s0 = __p0; \ + poly64_t __ret; \ + __ret = (poly64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64_t __ret; \ + __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + poly64_t __ret; \ + __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64_t __ret; \ + __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vget_lane_f64((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vget_lane_f64((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64_t __ret; \ + __ret = (float64_t) __builtin_neon_vget_lane_f64((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x1_t vget_low_p64(poly64x2_t __p0) { + poly64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0); + return __ret; +} +#else +__ai poly64x1_t vget_low_p64(poly64x2_t __p0) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x1_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vget_low_f64(float64x2_t __p0) { + float64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0); + return __ret; +} +#else +__ai float64x1_t vget_low_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x1_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev0, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p64(__p0) __extension__ ({ \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \ + __ret; \ +}) +#else +#define vld1_p64(__p0) __extension__ ({ \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p64(__p0) __extension__ ({ \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \ + __ret; \ +}) +#else +#define vld1q_p64(__p0) __extension__ ({ \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f64(__p0) __extension__ ({ \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \ + __ret; \ +}) +#else +#define vld1q_f64(__p0) __extension__ ({ \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f64(__p0) __extension__ ({ \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \ + __ret; \ +}) +#else +#define vld1_f64(__p0) __extension__ ({ \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_p64(__p0) __extension__ ({ \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \ + __ret; \ +}) +#else +#define vld1_dup_p64(__p0) __extension__ ({ \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_p64(__p0) __extension__ ({ \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \ + __ret; \ +}) +#else +#define vld1q_dup_p64(__p0) __extension__ ({ \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_f64(__p0) __extension__ ({ \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \ + __ret; \ +}) +#else +#define vld1q_dup_f64(__p0) __extension__ ({ \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_f64(__p0) __extension__ ({ \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \ + __ret; \ +}) +#else +#define vld1_dup_f64(__p0) __extension__ ({ \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __s1 = __p1; \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) +#else +#define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __s1 = __p1; \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \ + __ret; \ +}) +#else +#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \ + __ret; \ +}) +#else +#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __s1 = __p1; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \ + __ret; \ +}) +#else +#define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __s1 = __p1; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p8_x2(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld1_p8_x2(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p64_x2(__p0) __extension__ ({ \ + poly64x1x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \ + __ret; \ +}) +#else +#define vld1_p64_x2(__p0) __extension__ ({ \ + poly64x1x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p16_x2(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld1_p16_x2(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p8_x2(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld1q_p8_x2(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p64_x2(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld1q_p64_x2(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p16_x2(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld1q_p16_x2(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u8_x2(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld1q_u8_x2(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u32_x2(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld1q_u32_x2(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u64_x2(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld1q_u64_x2(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u16_x2(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld1q_u16_x2(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s8_x2(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld1q_s8_x2(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f64_x2(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld1q_f64_x2(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f32_x2(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld1q_f32_x2(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f16_x2(__p0) __extension__ ({ \ + float16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld1q_f16_x2(__p0) __extension__ ({ \ + float16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s32_x2(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld1q_s32_x2(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s64_x2(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld1q_s64_x2(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s16_x2(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld1q_s16_x2(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u8_x2(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld1_u8_x2(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u32_x2(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld1_u32_x2(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u64_x2(__p0) __extension__ ({ \ + uint64x1x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \ + __ret; \ +}) +#else +#define vld1_u64_x2(__p0) __extension__ ({ \ + uint64x1x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u16_x2(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld1_u16_x2(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s8_x2(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld1_s8_x2(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f64_x2(__p0) __extension__ ({ \ + float64x1x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \ + __ret; \ +}) +#else +#define vld1_f64_x2(__p0) __extension__ ({ \ + float64x1x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f32_x2(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld1_f32_x2(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f16_x2(__p0) __extension__ ({ \ + float16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld1_f16_x2(__p0) __extension__ ({ \ + float16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s32_x2(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld1_s32_x2(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s64_x2(__p0) __extension__ ({ \ + int64x1x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \ + __ret; \ +}) +#else +#define vld1_s64_x2(__p0) __extension__ ({ \ + int64x1x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s16_x2(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld1_s16_x2(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p8_x3(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld1_p8_x3(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p64_x3(__p0) __extension__ ({ \ + poly64x1x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \ + __ret; \ +}) +#else +#define vld1_p64_x3(__p0) __extension__ ({ \ + poly64x1x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p16_x3(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld1_p16_x3(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p8_x3(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld1q_p8_x3(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p64_x3(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld1q_p64_x3(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p16_x3(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld1q_p16_x3(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u8_x3(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld1q_u8_x3(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u32_x3(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld1q_u32_x3(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u64_x3(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld1q_u64_x3(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u16_x3(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld1q_u16_x3(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s8_x3(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld1q_s8_x3(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f64_x3(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld1q_f64_x3(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f32_x3(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld1q_f32_x3(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f16_x3(__p0) __extension__ ({ \ + float16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld1q_f16_x3(__p0) __extension__ ({ \ + float16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s32_x3(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld1q_s32_x3(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s64_x3(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld1q_s64_x3(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s16_x3(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld1q_s16_x3(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u8_x3(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld1_u8_x3(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u32_x3(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld1_u32_x3(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u64_x3(__p0) __extension__ ({ \ + uint64x1x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \ + __ret; \ +}) +#else +#define vld1_u64_x3(__p0) __extension__ ({ \ + uint64x1x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u16_x3(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld1_u16_x3(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s8_x3(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld1_s8_x3(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f64_x3(__p0) __extension__ ({ \ + float64x1x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \ + __ret; \ +}) +#else +#define vld1_f64_x3(__p0) __extension__ ({ \ + float64x1x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f32_x3(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld1_f32_x3(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f16_x3(__p0) __extension__ ({ \ + float16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld1_f16_x3(__p0) __extension__ ({ \ + float16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s32_x3(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld1_s32_x3(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s64_x3(__p0) __extension__ ({ \ + int64x1x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \ + __ret; \ +}) +#else +#define vld1_s64_x3(__p0) __extension__ ({ \ + int64x1x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s16_x3(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld1_s16_x3(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p8_x4(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld1_p8_x4(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p64_x4(__p0) __extension__ ({ \ + poly64x1x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \ + __ret; \ +}) +#else +#define vld1_p64_x4(__p0) __extension__ ({ \ + poly64x1x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p16_x4(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld1_p16_x4(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p8_x4(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld1q_p8_x4(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p64_x4(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld1q_p64_x4(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p16_x4(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld1q_p16_x4(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u8_x4(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld1q_u8_x4(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u32_x4(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld1q_u32_x4(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u64_x4(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld1q_u64_x4(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u16_x4(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld1q_u16_x4(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s8_x4(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld1q_s8_x4(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f64_x4(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld1q_f64_x4(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f32_x4(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld1q_f32_x4(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f16_x4(__p0) __extension__ ({ \ + float16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld1q_f16_x4(__p0) __extension__ ({ \ + float16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s32_x4(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld1q_s32_x4(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s64_x4(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld1q_s64_x4(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s16_x4(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld1q_s16_x4(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u8_x4(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld1_u8_x4(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u32_x4(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld1_u32_x4(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u64_x4(__p0) __extension__ ({ \ + uint64x1x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \ + __ret; \ +}) +#else +#define vld1_u64_x4(__p0) __extension__ ({ \ + uint64x1x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u16_x4(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld1_u16_x4(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s8_x4(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld1_s8_x4(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f64_x4(__p0) __extension__ ({ \ + float64x1x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \ + __ret; \ +}) +#else +#define vld1_f64_x4(__p0) __extension__ ({ \ + float64x1x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f32_x4(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld1_f32_x4(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f16_x4(__p0) __extension__ ({ \ + float16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld1_f16_x4(__p0) __extension__ ({ \ + float16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s32_x4(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld1_s32_x4(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s64_x4(__p0) __extension__ ({ \ + int64x1x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \ + __ret; \ +}) +#else +#define vld1_s64_x4(__p0) __extension__ ({ \ + int64x1x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s16_x4(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld1_s16_x4(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_p64(__p0) __extension__ ({ \ + poly64x1x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 6); \ + __ret; \ +}) +#else +#define vld2_p64(__p0) __extension__ ({ \ + poly64x1x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 6); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_p64(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld2q_p64(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_u64(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld2q_u64(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_f64(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld2q_f64(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_s64(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld2q_s64(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_f64(__p0) __extension__ ({ \ + float64x1x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 10); \ + __ret; \ +}) +#else +#define vld2_f64(__p0) __extension__ ({ \ + float64x1x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_p64(__p0) __extension__ ({ \ + poly64x1x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \ + __ret; \ +}) +#else +#define vld2_dup_p64(__p0) __extension__ ({ \ + poly64x1x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_p8(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld2q_dup_p8(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_p64(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld2q_dup_p64(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_p16(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld2q_dup_p16(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_u8(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld2q_dup_u8(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_u32(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld2q_dup_u32(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_u64(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld2q_dup_u64(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_u16(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld2q_dup_u16(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_s8(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld2q_dup_s8(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_f64(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld2q_dup_f64(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_f32(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld2q_dup_f32(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_f16(__p0) __extension__ ({ \ + float16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld2q_dup_f16(__p0) __extension__ ({ \ + float16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_s32(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld2q_dup_s32(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_s64(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld2q_dup_s64(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_s16(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld2q_dup_s16(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_f64(__p0) __extension__ ({ \ + float64x1x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \ + __ret; \ +}) +#else +#define vld2_dup_f64(__p0) __extension__ ({ \ + float64x1x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x2_t __s1 = __p1; \ + poly64x1x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \ + __ret; \ +}) +#else +#define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x2_t __s1 = __p1; \ + poly64x1x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + poly8x16x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \ + __ret; \ +}) +#else +#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + poly8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + poly64x2x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \ + __ret; \ +}) +#else +#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + poly64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + poly64x2x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + uint8x16x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \ + __ret; \ +}) +#else +#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + uint8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + uint64x2x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \ + __ret; \ +}) +#else +#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + uint64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + uint64x2x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + int8x16x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \ + __ret; \ +}) +#else +#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + int8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + float64x2x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 42); \ + __ret; \ +}) +#else +#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + float64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + float64x2x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + int64x2x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 35); \ + __ret; \ +}) +#else +#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + int64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + int64x2x2_t __ret; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x2_t __s1 = __p1; \ + uint64x1x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \ + __ret; \ +}) +#else +#define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x2_t __s1 = __p1; \ + uint64x1x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x2_t __s1 = __p1; \ + float64x1x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 10); \ + __ret; \ +}) +#else +#define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x2_t __s1 = __p1; \ + float64x1x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x2_t __s1 = __p1; \ + int64x1x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 3); \ + __ret; \ +}) +#else +#define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x2_t __s1 = __p1; \ + int64x1x2_t __ret; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_p64(__p0) __extension__ ({ \ + poly64x1x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 6); \ + __ret; \ +}) +#else +#define vld3_p64(__p0) __extension__ ({ \ + poly64x1x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 6); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_p64(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld3q_p64(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_u64(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld3q_u64(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_f64(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld3q_f64(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_s64(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld3q_s64(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_f64(__p0) __extension__ ({ \ + float64x1x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 10); \ + __ret; \ +}) +#else +#define vld3_f64(__p0) __extension__ ({ \ + float64x1x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_p64(__p0) __extension__ ({ \ + poly64x1x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \ + __ret; \ +}) +#else +#define vld3_dup_p64(__p0) __extension__ ({ \ + poly64x1x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_p8(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld3q_dup_p8(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_p64(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld3q_dup_p64(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_p16(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld3q_dup_p16(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_u8(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld3q_dup_u8(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_u32(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld3q_dup_u32(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_u64(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld3q_dup_u64(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_u16(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld3q_dup_u16(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_s8(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld3q_dup_s8(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_f64(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld3q_dup_f64(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_f32(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld3q_dup_f32(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_f16(__p0) __extension__ ({ \ + float16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld3q_dup_f16(__p0) __extension__ ({ \ + float16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_s32(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld3q_dup_s32(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_s64(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld3q_dup_s64(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_s16(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld3q_dup_s16(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_f64(__p0) __extension__ ({ \ + float64x1x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \ + __ret; \ +}) +#else +#define vld3_dup_f64(__p0) __extension__ ({ \ + float64x1x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x3_t __s1 = __p1; \ + poly64x1x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \ + __ret; \ +}) +#else +#define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x3_t __s1 = __p1; \ + poly64x1x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + poly8x16x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \ + __ret; \ +}) +#else +#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + poly8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + poly64x2x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \ + __ret; \ +}) +#else +#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + poly64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + poly64x2x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + uint8x16x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \ + __ret; \ +}) +#else +#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + uint8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + uint64x2x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \ + __ret; \ +}) +#else +#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + uint64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + uint64x2x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + int8x16x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \ + __ret; \ +}) +#else +#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + int8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + float64x2x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 42); \ + __ret; \ +}) +#else +#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + float64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + float64x2x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + int64x2x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 35); \ + __ret; \ +}) +#else +#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + int64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + int64x2x3_t __ret; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x3_t __s1 = __p1; \ + uint64x1x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \ + __ret; \ +}) +#else +#define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x3_t __s1 = __p1; \ + uint64x1x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x3_t __s1 = __p1; \ + float64x1x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \ + __ret; \ +}) +#else +#define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x3_t __s1 = __p1; \ + float64x1x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x3_t __s1 = __p1; \ + int64x1x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \ + __ret; \ +}) +#else +#define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x3_t __s1 = __p1; \ + int64x1x3_t __ret; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_p64(__p0) __extension__ ({ \ + poly64x1x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 6); \ + __ret; \ +}) +#else +#define vld4_p64(__p0) __extension__ ({ \ + poly64x1x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 6); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_p64(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld4q_p64(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_u64(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld4q_u64(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_f64(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld4q_f64(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_s64(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld4q_s64(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_f64(__p0) __extension__ ({ \ + float64x1x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 10); \ + __ret; \ +}) +#else +#define vld4_f64(__p0) __extension__ ({ \ + float64x1x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_p64(__p0) __extension__ ({ \ + poly64x1x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \ + __ret; \ +}) +#else +#define vld4_dup_p64(__p0) __extension__ ({ \ + poly64x1x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_p8(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld4q_dup_p8(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_p64(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld4q_dup_p64(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_p16(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld4q_dup_p16(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_u8(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld4q_dup_u8(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_u32(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld4q_dup_u32(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_u64(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld4q_dup_u64(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_u16(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld4q_dup_u16(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_s8(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld4q_dup_s8(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_f64(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld4q_dup_f64(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_f32(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld4q_dup_f32(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_f16(__p0) __extension__ ({ \ + float16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld4q_dup_f16(__p0) __extension__ ({ \ + float16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_s32(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld4q_dup_s32(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_s64(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld4q_dup_s64(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_s16(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld4q_dup_s16(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_f64(__p0) __extension__ ({ \ + float64x1x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \ + __ret; \ +}) +#else +#define vld4_dup_f64(__p0) __extension__ ({ \ + float64x1x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x4_t __s1 = __p1; \ + poly64x1x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \ + __ret; \ +}) +#else +#define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x4_t __s1 = __p1; \ + poly64x1x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + poly8x16x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \ + __ret; \ +}) +#else +#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + poly8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + poly64x2x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \ + __ret; \ +}) +#else +#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + poly64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + poly64x2x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + uint8x16x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \ + __ret; \ +}) +#else +#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + uint8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + uint64x2x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \ + __ret; \ +}) +#else +#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + uint64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + uint64x2x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + int8x16x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \ + __ret; \ +}) +#else +#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + int8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + float64x2x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 42); \ + __ret; \ +}) +#else +#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + float64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + float64x2x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + int64x2x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 35); \ + __ret; \ +}) +#else +#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + int64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + int64x2x4_t __ret; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x4_t __s1 = __p1; \ + uint64x1x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \ + __ret; \ +}) +#else +#define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x4_t __s1 = __p1; \ + uint64x1x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x4_t __s1 = __p1; \ + float64x1x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \ + __ret; \ +}) +#else +#define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x4_t __s1 = __p1; \ + float64x1x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x4_t __s1 = __p1; \ + int64x1x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \ + __ret; \ +}) +#else +#define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x4_t __s1 = __p1; \ + int64x1x4_t __ret; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vldrq_p128(__p0) __extension__ ({ \ + poly128_t __ret; \ + __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \ + __ret; \ +}) +#else +#define vldrq_p128(__p0) __extension__ ({ \ + poly128_t __ret; \ + __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#else +__ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vmaxnmvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vmaxnmvq_f64((int8x16_t)__p0); + return __ret; +} +#else +__ai float64_t vmaxnmvq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64_t __ret; + __ret = (float64_t) __builtin_neon_vmaxnmvq_f64((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vmaxnmvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxnmvq_f32((int8x16_t)__p0); + return __ret; +} +#else +__ai float32_t vmaxnmvq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxnmvq_f32((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vmaxnmv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxnmv_f32((int8x8_t)__p0); + return __ret; +} +#else +__ai float32_t vmaxnmv_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxnmv_f32((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8_t vmaxvq_u8(uint8x16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vmaxvq_u8((int8x16_t)__p0); + return __ret; +} +#else +__ai uint8_t vmaxvq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vmaxvq_u8((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vmaxvq_u32(uint32x4_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vmaxvq_u32((int8x16_t)__p0); + return __ret; +} +#else +__ai uint32_t vmaxvq_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vmaxvq_u32((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vmaxvq_u16(uint16x8_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vmaxvq_u16((int8x16_t)__p0); + return __ret; +} +#else +__ai uint16_t vmaxvq_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vmaxvq_u16((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vmaxvq_s8(int8x16_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vmaxvq_s8((int8x16_t)__p0); + return __ret; +} +#else +__ai int8_t vmaxvq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8_t __ret; + __ret = (int8_t) __builtin_neon_vmaxvq_s8((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vmaxvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vmaxvq_f64((int8x16_t)__p0); + return __ret; +} +#else +__ai float64_t vmaxvq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64_t __ret; + __ret = (float64_t) __builtin_neon_vmaxvq_f64((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vmaxvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxvq_f32((int8x16_t)__p0); + return __ret; +} +#else +__ai float32_t vmaxvq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxvq_f32((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vmaxvq_s32(int32x4_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vmaxvq_s32((int8x16_t)__p0); + return __ret; +} +#else +__ai int32_t vmaxvq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32_t __ret; + __ret = (int32_t) __builtin_neon_vmaxvq_s32((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vmaxvq_s16(int16x8_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vmaxvq_s16((int8x16_t)__p0); + return __ret; +} +#else +__ai int16_t vmaxvq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16_t __ret; + __ret = (int16_t) __builtin_neon_vmaxvq_s16((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8_t vmaxv_u8(uint8x8_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vmaxv_u8((int8x8_t)__p0); + return __ret; +} +#else +__ai uint8_t vmaxv_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vmaxv_u8((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vmaxv_u32(uint32x2_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vmaxv_u32((int8x8_t)__p0); + return __ret; +} +#else +__ai uint32_t vmaxv_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vmaxv_u32((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vmaxv_u16(uint16x4_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vmaxv_u16((int8x8_t)__p0); + return __ret; +} +#else +__ai uint16_t vmaxv_u16(uint16x4_t __p0) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vmaxv_u16((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vmaxv_s8(int8x8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vmaxv_s8((int8x8_t)__p0); + return __ret; +} +#else +__ai int8_t vmaxv_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8_t __ret; + __ret = (int8_t) __builtin_neon_vmaxv_s8((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vmaxv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxv_f32((int8x8_t)__p0); + return __ret; +} +#else +__ai float32_t vmaxv_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxv_f32((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vmaxv_s32(int32x2_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vmaxv_s32((int8x8_t)__p0); + return __ret; +} +#else +__ai int32_t vmaxv_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32_t __ret; + __ret = (int32_t) __builtin_neon_vmaxv_s32((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vmaxv_s16(int16x4_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vmaxv_s16((int8x8_t)__p0); + return __ret; +} +#else +__ai int16_t vmaxv_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16_t __ret; + __ret = (int16_t) __builtin_neon_vmaxv_s16((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#else +__ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vminnmvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vminnmvq_f64((int8x16_t)__p0); + return __ret; +} +#else +__ai float64_t vminnmvq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64_t __ret; + __ret = (float64_t) __builtin_neon_vminnmvq_f64((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vminnmvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminnmvq_f32((int8x16_t)__p0); + return __ret; +} +#else +__ai float32_t vminnmvq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminnmvq_f32((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vminnmv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminnmv_f32((int8x8_t)__p0); + return __ret; +} +#else +__ai float32_t vminnmv_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminnmv_f32((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8_t vminvq_u8(uint8x16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vminvq_u8((int8x16_t)__p0); + return __ret; +} +#else +__ai uint8_t vminvq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vminvq_u8((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vminvq_u32(uint32x4_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vminvq_u32((int8x16_t)__p0); + return __ret; +} +#else +__ai uint32_t vminvq_u32(uint32x4_t __p0) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vminvq_u32((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vminvq_u16(uint16x8_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vminvq_u16((int8x16_t)__p0); + return __ret; +} +#else +__ai uint16_t vminvq_u16(uint16x8_t __p0) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vminvq_u16((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vminvq_s8(int8x16_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vminvq_s8((int8x16_t)__p0); + return __ret; +} +#else +__ai int8_t vminvq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8_t __ret; + __ret = (int8_t) __builtin_neon_vminvq_s8((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vminvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vminvq_f64((int8x16_t)__p0); + return __ret; +} +#else +__ai float64_t vminvq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64_t __ret; + __ret = (float64_t) __builtin_neon_vminvq_f64((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vminvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminvq_f32((int8x16_t)__p0); + return __ret; +} +#else +__ai float32_t vminvq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminvq_f32((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vminvq_s32(int32x4_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vminvq_s32((int8x16_t)__p0); + return __ret; +} +#else +__ai int32_t vminvq_s32(int32x4_t __p0) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32_t __ret; + __ret = (int32_t) __builtin_neon_vminvq_s32((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vminvq_s16(int16x8_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vminvq_s16((int8x16_t)__p0); + return __ret; +} +#else +__ai int16_t vminvq_s16(int16x8_t __p0) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16_t __ret; + __ret = (int16_t) __builtin_neon_vminvq_s16((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8_t vminv_u8(uint8x8_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vminv_u8((int8x8_t)__p0); + return __ret; +} +#else +__ai uint8_t vminv_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vminv_u8((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vminv_u32(uint32x2_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vminv_u32((int8x8_t)__p0); + return __ret; +} +#else +__ai uint32_t vminv_u32(uint32x2_t __p0) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vminv_u32((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vminv_u16(uint16x4_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vminv_u16((int8x8_t)__p0); + return __ret; +} +#else +__ai uint16_t vminv_u16(uint16x4_t __p0) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vminv_u16((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vminv_s8(int8x8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vminv_s8((int8x8_t)__p0); + return __ret; +} +#else +__ai int8_t vminv_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8_t __ret; + __ret = (int8_t) __builtin_neon_vminv_s8((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vminv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminv_f32((int8x8_t)__p0); + return __ret; +} +#else +__ai float32_t vminv_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminv_f32((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vminv_s32(int32x2_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vminv_s32((int8x8_t)__p0); + return __ret; +} +#else +__ai int32_t vminv_s32(int32x2_t __p0) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32_t __ret; + __ret = (int32_t) __builtin_neon_vminv_s32((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vminv_s16(int16x4_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vminv_s16((int8x8_t)__p0); + return __ret; +} +#else +__ai int16_t vminv_s16(int16x4_t __p0) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16_t __ret; + __ret = (int16_t) __builtin_neon_vminv_s16((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float64x2_t __ret; + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x4_t __ret; \ + __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmlaq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __s2 = __p2; \ + uint16x8_t __ret; \ + __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmlaq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __s2 = __p2; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x4_t __ret; \ + __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmlaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + float32x4_t __ret; \ + __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x4_t __ret; \ + __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmlaq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x8_t __ret; \ + __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmlaq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x2_t __ret; \ + __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \ + __ret; \ +}) +#else +#define vmla_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + uint32x2_t __ret; \ + __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x8_t __s2 = __p2; \ + uint16x4_t __ret; \ + __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmla_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x8_t __s2 = __p2; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x2_t __ret; \ + __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \ + __ret; \ +}) +#else +#define vmla_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + float32x2_t __ret; \ + __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x2_t __ret; \ + __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \ + __ret; \ +}) +#else +#define vmla_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int32x2_t __ret; \ + __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x4_t __ret; \ + __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmla_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmlaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { + float64x2_t __ret; + __ret = __p0 + __p1 * (float64x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai float64x2_t vmlaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __rev0 + __rev1 * (float64x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x2_t __s2 = __p2; \ + uint64x2_t __ret; \ + __ret = __s0 + vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlal_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x2_t __s2 = __p2; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + uint64x2_t __ret; \ + __ret = __rev0 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x4_t __s2 = __p2; \ + uint32x4_t __ret; \ + __ret = __s0 + vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlal_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x4_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = __rev0 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int64x2_t __ret; \ + __ret = __s0 + vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + int64x2_t __ret; \ + __ret = __rev0 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int32x4_t __ret; \ + __ret = __s0 + vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __rev0 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint64x2_t __ret; \ + __ret = __s0 + vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlal_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + uint64x2_t __ret; \ + __ret = __rev0 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __s2 = __p2; \ + uint32x4_t __ret; \ + __ret = __s0 + vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlal_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = __rev0 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int64x2_t __ret; \ + __ret = __s0 + vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int64x2_t __ret; \ + __ret = __rev0 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int32x4_t __ret; \ + __ret = __s0 + vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __rev0 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint64x2_t __ret; \ + __ret = __s0 + vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlal_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + uint64x2_t __ret; \ + __ret = __rev0 + __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x8_t __s2 = __p2; \ + uint32x4_t __ret; \ + __ret = __s0 + vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlal_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x8_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = __rev0 + __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int64x2_t __ret; \ + __ret = __s0 + vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int64x2_t __ret; \ + __ret = __rev0 + __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int32x4_t __ret; \ + __ret = __s0 + vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __rev0 + __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + float64x2_t __ret; + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x4_t __ret; \ + __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmlsq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __s2 = __p2; \ + uint16x8_t __ret; \ + __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmlsq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __s2 = __p2; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x4_t __ret; \ + __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmlsq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + float32x4_t __ret; \ + __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x4_t __ret; \ + __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmlsq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x8_t __ret; \ + __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmlsq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x2_t __ret; \ + __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \ + __ret; \ +}) +#else +#define vmls_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + uint32x2_t __ret; \ + __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x8_t __s2 = __p2; \ + uint16x4_t __ret; \ + __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmls_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x8_t __s2 = __p2; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x2_t __ret; \ + __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \ + __ret; \ +}) +#else +#define vmls_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + float32x2_t __ret; \ + __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x2_t __ret; \ + __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \ + __ret; \ +}) +#else +#define vmls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int32x2_t __ret; \ + __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x4_t __ret; \ + __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ + __ret; \ +}) +#else +#define vmls_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmlsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { + float64x2_t __ret; + __ret = __p0 - __p1 * (float64x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai float64x2_t vmlsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __rev0 - __rev1 * (float64x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x2_t __s2 = __p2; \ + uint64x2_t __ret; \ + __ret = __s0 - vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlsl_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x2_t __s2 = __p2; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + uint64x2_t __ret; \ + __ret = __rev0 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x4_t __s2 = __p2; \ + uint32x4_t __ret; \ + __ret = __s0 - vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlsl_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x4_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = __rev0 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int64x2_t __ret; \ + __ret = __s0 - vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + int64x2_t __ret; \ + __ret = __rev0 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int32x4_t __ret; \ + __ret = __s0 - vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __rev0 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint64x2_t __ret; \ + __ret = __s0 - vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlsl_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + uint64x2_t __ret; \ + __ret = __rev0 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __s2 = __p2; \ + uint32x4_t __ret; \ + __ret = __s0 - vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlsl_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = __rev0 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int64x2_t __ret; \ + __ret = __s0 - vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int64x2_t __ret; \ + __ret = __rev0 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int32x4_t __ret; \ + __ret = __s0 - vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __rev0 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint64x2_t __ret; \ + __ret = __s0 - vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlsl_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + uint64x2_t __ret; \ + __ret = __rev0 - __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x8_t __s2 = __p2; \ + uint32x4_t __ret; \ + __ret = __s0 - vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlsl_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x8_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = __rev0 - __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int64x2_t __ret; \ + __ret = __s0 - vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int64x2_t __ret; \ + __ret = __rev0 - __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int32x4_t __ret; \ + __ret = __s0 - vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __rev0 - __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x1_t vmov_n_p64(poly64_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t) {__p0}; + return __ret; +} +#else +__ai poly64x1_t vmov_n_p64(poly64_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t) {__p0}; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vmovq_n_p64(poly64_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai poly64x2_t vmovq_n_p64(poly64_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmovq_n_f64(float64_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai float64x2_t vmovq_n_f64(float64_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vmov_n_f64(float64_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) {__p0}; + return __ret; +} +#else +__ai float64x1_t vmov_n_f64(float64_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) {__p0}; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_112) { + uint16x8_t __ret_112; + uint8x8_t __a1_112 = vget_high_u8(__p0_112); + __ret_112 = (uint16x8_t)(vshll_n_u8(__a1_112, 0)); + return __ret_112; +} +#else +__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_113) { + uint8x16_t __rev0_113; __rev0_113 = __builtin_shufflevector(__p0_113, __p0_113, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret_113; + uint8x8_t __a1_113 = __noswap_vget_high_u8(__rev0_113); + __ret_113 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_113, 0)); + __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret_113; +} +__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_114) { + uint16x8_t __ret_114; + uint8x8_t __a1_114 = __noswap_vget_high_u8(__p0_114); + __ret_114 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_114, 0)); + return __ret_114; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_115) { + uint64x2_t __ret_115; + uint32x2_t __a1_115 = vget_high_u32(__p0_115); + __ret_115 = (uint64x2_t)(vshll_n_u32(__a1_115, 0)); + return __ret_115; +} +#else +__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_116) { + uint32x4_t __rev0_116; __rev0_116 = __builtin_shufflevector(__p0_116, __p0_116, 3, 2, 1, 0); + uint64x2_t __ret_116; + uint32x2_t __a1_116 = __noswap_vget_high_u32(__rev0_116); + __ret_116 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_116, 0)); + __ret_116 = __builtin_shufflevector(__ret_116, __ret_116, 1, 0); + return __ret_116; +} +__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_117) { + uint64x2_t __ret_117; + uint32x2_t __a1_117 = __noswap_vget_high_u32(__p0_117); + __ret_117 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_117, 0)); + return __ret_117; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_118) { + uint32x4_t __ret_118; + uint16x4_t __a1_118 = vget_high_u16(__p0_118); + __ret_118 = (uint32x4_t)(vshll_n_u16(__a1_118, 0)); + return __ret_118; +} +#else +__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_119) { + uint16x8_t __rev0_119; __rev0_119 = __builtin_shufflevector(__p0_119, __p0_119, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret_119; + uint16x4_t __a1_119 = __noswap_vget_high_u16(__rev0_119); + __ret_119 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_119, 0)); + __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 3, 2, 1, 0); + return __ret_119; +} +__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_120) { + uint32x4_t __ret_120; + uint16x4_t __a1_120 = __noswap_vget_high_u16(__p0_120); + __ret_120 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_120, 0)); + return __ret_120; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmovl_high_s8(int8x16_t __p0_121) { + int16x8_t __ret_121; + int8x8_t __a1_121 = vget_high_s8(__p0_121); + __ret_121 = (int16x8_t)(vshll_n_s8(__a1_121, 0)); + return __ret_121; +} +#else +__ai int16x8_t vmovl_high_s8(int8x16_t __p0_122) { + int8x16_t __rev0_122; __rev0_122 = __builtin_shufflevector(__p0_122, __p0_122, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret_122; + int8x8_t __a1_122 = __noswap_vget_high_s8(__rev0_122); + __ret_122 = (int16x8_t)(__noswap_vshll_n_s8(__a1_122, 0)); + __ret_122 = __builtin_shufflevector(__ret_122, __ret_122, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret_122; +} +__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_123) { + int16x8_t __ret_123; + int8x8_t __a1_123 = __noswap_vget_high_s8(__p0_123); + __ret_123 = (int16x8_t)(__noswap_vshll_n_s8(__a1_123, 0)); + return __ret_123; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmovl_high_s32(int32x4_t __p0_124) { + int64x2_t __ret_124; + int32x2_t __a1_124 = vget_high_s32(__p0_124); + __ret_124 = (int64x2_t)(vshll_n_s32(__a1_124, 0)); + return __ret_124; +} +#else +__ai int64x2_t vmovl_high_s32(int32x4_t __p0_125) { + int32x4_t __rev0_125; __rev0_125 = __builtin_shufflevector(__p0_125, __p0_125, 3, 2, 1, 0); + int64x2_t __ret_125; + int32x2_t __a1_125 = __noswap_vget_high_s32(__rev0_125); + __ret_125 = (int64x2_t)(__noswap_vshll_n_s32(__a1_125, 0)); + __ret_125 = __builtin_shufflevector(__ret_125, __ret_125, 1, 0); + return __ret_125; +} +__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_126) { + int64x2_t __ret_126; + int32x2_t __a1_126 = __noswap_vget_high_s32(__p0_126); + __ret_126 = (int64x2_t)(__noswap_vshll_n_s32(__a1_126, 0)); + return __ret_126; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmovl_high_s16(int16x8_t __p0_127) { + int32x4_t __ret_127; + int16x4_t __a1_127 = vget_high_s16(__p0_127); + __ret_127 = (int32x4_t)(vshll_n_s16(__a1_127, 0)); + return __ret_127; +} +#else +__ai int32x4_t vmovl_high_s16(int16x8_t __p0_128) { + int16x8_t __rev0_128; __rev0_128 = __builtin_shufflevector(__p0_128, __p0_128, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret_128; + int16x4_t __a1_128 = __noswap_vget_high_s16(__rev0_128); + __ret_128 = (int32x4_t)(__noswap_vshll_n_s16(__a1_128, 0)); + __ret_128 = __builtin_shufflevector(__ret_128, __ret_128, 3, 2, 1, 0); + return __ret_128; +} +__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_129) { + int32x4_t __ret_129; + int16x4_t __a1_129 = __noswap_vget_high_s16(__p0_129); + __ret_129 = (int32x4_t)(__noswap_vshll_n_s16(__a1_129, 0)); + return __ret_129; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vmovn_u32(__p1)); + return __ret; +} +#else +__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vcombine_u16(__rev0, __noswap_vmovn_u32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vmovn_u64(__p1)); + return __ret; +} +#else +__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vcombine_u32(__rev0, __noswap_vmovn_u64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vmovn_u16(__p1)); + return __ret; +} +#else +__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __noswap_vcombine_u8(__rev0, __noswap_vmovn_u16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vmovn_s32(__p1)); + return __ret; +} +#else +__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vcombine_s16(__rev0, __noswap_vmovn_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vmovn_s64(__p1)); + return __ret; +} +#else +__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x4_t __ret; + __ret = __noswap_vcombine_s32(__rev0, __noswap_vmovn_s64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vmovn_s16(__p1)); + return __ret; +} +#else +__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __noswap_vcombine_s8(__rev0, __noswap_vmovn_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmuld_lane_f64(__p0_130, __p1_130, __p2_130) __extension__ ({ \ + float64_t __s0_130 = __p0_130; \ + float64x1_t __s1_130 = __p1_130; \ + float64_t __ret_130; \ + __ret_130 = __s0_130 * vget_lane_f64(__s1_130, __p2_130); \ + __ret_130; \ +}) +#else +#define vmuld_lane_f64(__p0_131, __p1_131, __p2_131) __extension__ ({ \ + float64_t __s0_131 = __p0_131; \ + float64x1_t __s1_131 = __p1_131; \ + float64_t __ret_131; \ + __ret_131 = __s0_131 * __noswap_vget_lane_f64(__s1_131, __p2_131); \ + __ret_131; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmuls_lane_f32(__p0_132, __p1_132, __p2_132) __extension__ ({ \ + float32_t __s0_132 = __p0_132; \ + float32x2_t __s1_132 = __p1_132; \ + float32_t __ret_132; \ + __ret_132 = __s0_132 * vget_lane_f32(__s1_132, __p2_132); \ + __ret_132; \ +}) +#else +#define vmuls_lane_f32(__p0_133, __p1_133, __p2_133) __extension__ ({ \ + float32_t __s0_133 = __p0_133; \ + float32x2_t __s1_133 = __p1_133; \ + float32x2_t __rev1_133; __rev1_133 = __builtin_shufflevector(__s1_133, __s1_133, 1, 0); \ + float32_t __ret_133; \ + __ret_133 = __s0_133 * __noswap_vget_lane_f32(__rev1_133, __p2_133); \ + __ret_133; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \ + __ret; \ +}) +#else +#define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x2_t __ret; \ + __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \ + __ret; \ +}) +#else +#define vmulq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __ret; \ + __ret = __rev0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmuld_laneq_f64(__p0_134, __p1_134, __p2_134) __extension__ ({ \ + float64_t __s0_134 = __p0_134; \ + float64x2_t __s1_134 = __p1_134; \ + float64_t __ret_134; \ + __ret_134 = __s0_134 * vgetq_lane_f64(__s1_134, __p2_134); \ + __ret_134; \ +}) +#else +#define vmuld_laneq_f64(__p0_135, __p1_135, __p2_135) __extension__ ({ \ + float64_t __s0_135 = __p0_135; \ + float64x2_t __s1_135 = __p1_135; \ + float64x2_t __rev1_135; __rev1_135 = __builtin_shufflevector(__s1_135, __s1_135, 1, 0); \ + float64_t __ret_135; \ + __ret_135 = __s0_135 * __noswap_vgetq_lane_f64(__rev1_135, __p2_135); \ + __ret_135; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmuls_laneq_f32(__p0_136, __p1_136, __p2_136) __extension__ ({ \ + float32_t __s0_136 = __p0_136; \ + float32x4_t __s1_136 = __p1_136; \ + float32_t __ret_136; \ + __ret_136 = __s0_136 * vgetq_lane_f32(__s1_136, __p2_136); \ + __ret_136; \ +}) +#else +#define vmuls_laneq_f32(__p0_137, __p1_137, __p2_137) __extension__ ({ \ + float32_t __s0_137 = __p0_137; \ + float32x4_t __s1_137 = __p1_137; \ + float32x4_t __rev1_137; __rev1_137 = __builtin_shufflevector(__s1_137, __s1_137, 3, 2, 1, 0); \ + float32_t __ret_137; \ + __ret_137 = __s0_137 * __noswap_vgetq_lane_f32(__rev1_137, __p2_137); \ + __ret_137; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 10); \ + __ret; \ +}) +#else +#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__rev1, __p2, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __ret; \ + __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \ + __ret; \ +}) +#else +#define vmulq_laneq_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __ret; \ + __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \ + __ret; \ +}) +#else +#define vmulq_laneq_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret; \ + __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __ret; \ + __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \ + __ret; \ +}) +#else +#define vmulq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float64x2_t __ret; \ + __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __ret; \ + __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \ + __ret; \ +}) +#else +#define vmulq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float32x4_t __ret; \ + __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \ + __ret; \ +}) +#else +#define vmulq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \ + __ret; \ +}) +#else +#define vmulq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x2_t __ret; \ + __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \ + __ret; \ +}) +#else +#define vmul_laneq_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x2_t __ret; \ + __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x4_t __ret; \ + __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \ + __ret; \ +}) +#else +#define vmul_laneq_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __ret; \ + __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x2_t __ret; \ + __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \ + __ret; \ +}) +#else +#define vmul_laneq_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float32x2_t __ret; \ + __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \ + __ret; \ +}) +#else +#define vmul_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x2_t __ret; \ + __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \ + __ret; \ +}) +#else +#define vmul_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmul_n_f64((int8x8_t)__p0, __p1); + return __ret; +} +#else +__ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmul_n_f64((int8x8_t)__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) { + float64x2_t __ret; + __ret = __p0 * (float64x2_t) {__p1, __p1}; + return __ret; +} +#else +__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = __rev0 * (float64x2_t) {__p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) { + poly128_t __ret; + __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1); + return __ret; +} +#else +__ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) { + poly128_t __ret; + __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1); + return __ret; +} +__ai poly128_t __noswap_vmull_p64(poly64_t __p0, poly64_t __p1) { + poly128_t __ret; + __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly16x8_t __ret; + __ret = vmull_p8(vget_high_p8(__p0), vget_high_p8(__p1)); + return __ret; +} +#else +__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = __noswap_vmull_p8(__noswap_vget_high_p8(__rev0), __noswap_vget_high_p8(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = vmull_u8(vget_high_u8(__p0), vget_high_u8(__p1)); + return __ret; +} +#else +__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vmull_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = vmull_u32(vget_high_u32(__p0), vget_high_u32(__p1)); + return __ret; +} +#else +__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = vmull_u16(vget_high_u16(__p0), vget_high_u16(__p1)); + return __ret; +} +#else +__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = vmull_s8(vget_high_s8(__p0), vget_high_s8(__p1)); + return __ret; +} +#else +__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vmull_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = vmull_s32(vget_high_s32(__p0), vget_high_s32(__p1)); + return __ret; +} +#else +__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = vmull_s16(vget_high_s16(__p0), vget_high_s16(__p1)); + return __ret; +} +#else +__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly128_t __ret; + __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1))); + return __ret; +} +#else +__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + poly128_t __ret; + __ret = __noswap_vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint64x2_t __ret; \ + __ret = vmull_u32(vget_high_u32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ + __ret; \ +}) +#else +#define vmull_high_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint64x2_t __ret; \ + __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint32x4_t __ret; \ + __ret = vmull_u16(vget_high_u16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vmull_high_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int64x2_t __ret; \ + __ret = vmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ + __ret; \ +}) +#else +#define vmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int64x2_t __ret; \ + __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = vmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_laneq_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint64x2_t __ret; \ + __ret = vmull_u32(vget_high_u32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ + __ret; \ +}) +#else +#define vmull_high_laneq_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint64x2_t __ret; \ + __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_laneq_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint32x4_t __ret; \ + __ret = vmull_u16(vget_high_u16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vmull_high_laneq_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int64x2_t __ret; \ + __ret = vmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ + __ret; \ +}) +#else +#define vmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int64x2_t __ret; \ + __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = vmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) { + uint64x2_t __ret; + __ret = vmull_n_u32(vget_high_u32(__p0), __p1); + return __ret; +} +#else +__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vmull_n_u32(__noswap_vget_high_u32(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) { + uint32x4_t __ret; + __ret = vmull_n_u16(vget_high_u16(__p0), __p1); + return __ret; +} +#else +__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vmull_n_u16(__noswap_vget_high_u16(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = vmull_n_s32(vget_high_s32(__p0), __p1); + return __ret; +} +#else +__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vmull_n_s32(__noswap_vget_high_s32(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = vmull_n_s16(vget_high_s16(__p0), __p1); + return __ret; +} +#else +__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vmull_n_s16(__noswap_vget_high_s16(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_laneq_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint64x2_t __ret; \ + __ret = vmull_u32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ + __ret; \ +}) +#else +#define vmull_laneq_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint64x2_t __ret; \ + __ret = __noswap_vmull_u32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_laneq_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint32x4_t __ret; \ + __ret = vmull_u16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vmull_laneq_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = __noswap_vmull_u16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int64x2_t __ret; \ + __ret = vmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ + __ret; \ +}) +#else +#define vmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int64x2_t __ret; \ + __ret = __noswap_vmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = vmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __noswap_vmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#else +__ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1); + return __ret; +} +#else +__ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1); + return __ret; +} +__ai float64_t __noswap_vmulxd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1); + return __ret; +} +#else +__ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1); + return __ret; +} +__ai float32_t __noswap_vmulxs_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxd_lane_f64(__p0_138, __p1_138, __p2_138) __extension__ ({ \ + float64_t __s0_138 = __p0_138; \ + float64x1_t __s1_138 = __p1_138; \ + float64_t __ret_138; \ + __ret_138 = vmulxd_f64(__s0_138, vget_lane_f64(__s1_138, __p2_138)); \ + __ret_138; \ +}) +#else +#define vmulxd_lane_f64(__p0_139, __p1_139, __p2_139) __extension__ ({ \ + float64_t __s0_139 = __p0_139; \ + float64x1_t __s1_139 = __p1_139; \ + float64_t __ret_139; \ + __ret_139 = __noswap_vmulxd_f64(__s0_139, __noswap_vget_lane_f64(__s1_139, __p2_139)); \ + __ret_139; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxs_lane_f32(__p0_140, __p1_140, __p2_140) __extension__ ({ \ + float32_t __s0_140 = __p0_140; \ + float32x2_t __s1_140 = __p1_140; \ + float32_t __ret_140; \ + __ret_140 = vmulxs_f32(__s0_140, vget_lane_f32(__s1_140, __p2_140)); \ + __ret_140; \ +}) +#else +#define vmulxs_lane_f32(__p0_141, __p1_141, __p2_141) __extension__ ({ \ + float32_t __s0_141 = __p0_141; \ + float32x2_t __s1_141 = __p1_141; \ + float32x2_t __rev1_141; __rev1_141 = __builtin_shufflevector(__s1_141, __s1_141, 1, 0); \ + float32_t __ret_141; \ + __ret_141 = __noswap_vmulxs_f32(__s0_141, __noswap_vget_lane_f32(__rev1_141, __p2_141)); \ + __ret_141; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x2_t __ret; \ + __ret = vmulxq_f64(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ + __ret; \ +}) +#else +#define vmulxq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __ret; \ + __ret = __noswap_vmulxq_f64(__rev0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x4_t __ret; \ + __ret = vmulxq_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vmulxq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float32x4_t __ret; \ + __ret = __noswap_vmulxq_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __ret; \ + __ret = vmulx_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ + __ret; \ +}) +#else +#define vmulx_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float32x2_t __ret; \ + __ret = __noswap_vmulx_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxd_laneq_f64(__p0_142, __p1_142, __p2_142) __extension__ ({ \ + float64_t __s0_142 = __p0_142; \ + float64x2_t __s1_142 = __p1_142; \ + float64_t __ret_142; \ + __ret_142 = vmulxd_f64(__s0_142, vgetq_lane_f64(__s1_142, __p2_142)); \ + __ret_142; \ +}) +#else +#define vmulxd_laneq_f64(__p0_143, __p1_143, __p2_143) __extension__ ({ \ + float64_t __s0_143 = __p0_143; \ + float64x2_t __s1_143 = __p1_143; \ + float64x2_t __rev1_143; __rev1_143 = __builtin_shufflevector(__s1_143, __s1_143, 1, 0); \ + float64_t __ret_143; \ + __ret_143 = __noswap_vmulxd_f64(__s0_143, __noswap_vgetq_lane_f64(__rev1_143, __p2_143)); \ + __ret_143; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxs_laneq_f32(__p0_144, __p1_144, __p2_144) __extension__ ({ \ + float32_t __s0_144 = __p0_144; \ + float32x4_t __s1_144 = __p1_144; \ + float32_t __ret_144; \ + __ret_144 = vmulxs_f32(__s0_144, vgetq_lane_f32(__s1_144, __p2_144)); \ + __ret_144; \ +}) +#else +#define vmulxs_laneq_f32(__p0_145, __p1_145, __p2_145) __extension__ ({ \ + float32_t __s0_145 = __p0_145; \ + float32x4_t __s1_145 = __p1_145; \ + float32x4_t __rev1_145; __rev1_145 = __builtin_shufflevector(__s1_145, __s1_145, 3, 2, 1, 0); \ + float32_t __ret_145; \ + __ret_145 = __noswap_vmulxs_f32(__s0_145, __noswap_vgetq_lane_f32(__rev1_145, __p2_145)); \ + __ret_145; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __ret; \ + __ret = vmulxq_f64(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ + __ret; \ +}) +#else +#define vmulxq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float64x2_t __ret; \ + __ret = __noswap_vmulxq_f64(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __ret; \ + __ret = vmulxq_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vmulxq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float32x4_t __ret; \ + __ret = __noswap_vmulxq_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_laneq_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x2_t __ret; \ + __ret = vmulx_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ + __ret; \ +}) +#else +#define vmulx_laneq_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float32x2_t __ret; \ + __ret = __noswap_vmulx_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vnegq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai float64x2_t vnegq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vnegq_s64(int64x2_t __p0) { + int64x2_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai int64x2_t vnegq_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __ret; + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vneg_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai float64x1_t vneg_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = -__p0; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vneg_s64(int64x1_t __p0) { + int64x1_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai int64x1_t vneg_s64(int64x1_t __p0) { + int64x1_t __ret; + __ret = -__p0; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vnegd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vnegd_s64(__p0); + return __ret; +} +#else +__ai int64_t vnegd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vnegd_s64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vpaddd_u64(uint64x2_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vpaddd_u64((int8x16_t)__p0); + return __ret; +} +#else +__ai uint64_t vpaddd_u64(uint64x2_t __p0) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vpaddd_u64((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vpaddd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpaddd_f64((int8x16_t)__p0); + return __ret; +} +#else +__ai float64_t vpaddd_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpaddd_f64((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vpaddd_s64(int64x2_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vpaddd_s64((int8x16_t)__p0); + return __ret; +} +#else +__ai int64_t vpaddd_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64_t __ret; + __ret = (int64_t) __builtin_neon_vpaddd_s64((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vpadds_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpadds_f32((int8x8_t)__p0); + return __ret; +} +#else +__ai float32_t vpadds_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpadds_f32((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vpmaxqd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpmaxqd_f64((int8x16_t)__p0); + return __ret; +} +#else +__ai float64_t vpmaxqd_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpmaxqd_f64((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vpmaxs_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpmaxs_f32((int8x8_t)__p0); + return __ret; +} +#else +__ai float32_t vpmaxs_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpmaxs_f32((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64((int8x16_t)__p0); + return __ret; +} +#else +__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vpmaxnms_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpmaxnms_f32((int8x8_t)__p0); + return __ret; +} +#else +__ai float32_t vpmaxnms_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpmaxnms_f32((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vpminqd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpminqd_f64((int8x16_t)__p0); + return __ret; +} +#else +__ai float64_t vpminqd_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpminqd_f64((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vpmins_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpmins_f32((int8x8_t)__p0); + return __ret; +} +#else +__ai float32_t vpmins_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpmins_f32((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vpminnmqd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpminnmqd_f64((int8x16_t)__p0); + return __ret; +} +#else +__ai float64_t vpminnmqd_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpminnmqd_f64((int8x16_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vpminnms_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpminnms_f32((int8x8_t)__p0); + return __ret; +} +#else +__ai float32_t vpminnms_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpminnms_f32((int8x8_t)__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqabsq_s64(int64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vqabsq_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vqabs_s64(int64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3); + return __ret; +} +#else +__ai int64x1_t vqabs_s64(int64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vqabsb_s8(int8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0); + return __ret; +} +#else +__ai int8_t vqabsb_s8(int8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vqabss_s32(int32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqabss_s32(__p0); + return __ret; +} +#else +__ai int32_t vqabss_s32(int32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqabss_s32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vqabsd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0); + return __ret; +} +#else +__ai int64_t vqabsd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vqabsh_s16(int16_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0); + return __ret; +} +#else +__ai int16_t vqabsh_s16(int16_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1); + return __ret; +} +#else +__ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1); + return __ret; +} +#else +__ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1); + return __ret; +} +#else +__ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1); + return __ret; +} +#else +__ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1); + return __ret; +} +#else +__ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1); + return __ret; +} +#else +__ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1); + return __ret; +} +__ai int32_t __noswap_vqadds_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1); + return __ret; +} +#else +__ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1); + return __ret; +} +#else +__ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1); + return __ret; +} +__ai int16_t __noswap_vqaddh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2); + return __ret; +} +#else +__ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2); + return __ret; +} +#else +__ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + __ret = vqdmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); + return __ret; +} +#else +__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + __ret = vqdmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); + return __ret; +} +#else +__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int64x2_t __ret; \ + __ret = vqdmlal_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ + __ret; \ +}) +#else +#define vqdmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + int64x2_t __ret; \ + __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int32x4_t __ret; \ + __ret = vqdmlal_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ + __ret; \ +}) +#else +#define vqdmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int64x2_t __ret; \ + __ret = vqdmlal_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ + __ret; \ +}) +#else +#define vqdmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int64x2_t __ret; \ + __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int32x4_t __ret; \ + __ret = vqdmlal_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ + __ret; \ +}) +#else +#define vqdmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vqdmlal_n_s32(__p0, vget_high_s32(__p1), __p2); + return __ret; +} +#else +__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vqdmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vqdmlal_n_s16(__p0, vget_high_s16(__p1), __p2); + return __ret; +} +#else +__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vqdmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, (int8x8_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, (int8x8_t)__rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, (int8x8_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, (int8x8_t)__rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, (int8x16_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, (int8x16_t)__rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, (int8x16_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, (int8x16_t)__rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int64x2_t __ret; \ + __ret = vqdmlal_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ + __ret; \ +}) +#else +#define vqdmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int64x2_t __ret; \ + __ret = __noswap_vqdmlal_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int32x4_t __ret; \ + __ret = vqdmlal_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ + __ret; \ +}) +#else +#define vqdmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __noswap_vqdmlal_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2); + return __ret; +} +#else +__ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2); + return __ret; +} +#else +__ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + __ret = vqdmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); + return __ret; +} +#else +__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + __ret = vqdmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); + return __ret; +} +#else +__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int64x2_t __ret; \ + __ret = vqdmlsl_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ + __ret; \ +}) +#else +#define vqdmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + int64x2_t __ret; \ + __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int32x4_t __ret; \ + __ret = vqdmlsl_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ + __ret; \ +}) +#else +#define vqdmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int64x2_t __ret; \ + __ret = vqdmlsl_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ + __ret; \ +}) +#else +#define vqdmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int64x2_t __ret; \ + __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int32x4_t __ret; \ + __ret = vqdmlsl_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ + __ret; \ +}) +#else +#define vqdmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vqdmlsl_n_s32(__p0, vget_high_s32(__p1), __p2); + return __ret; +} +#else +__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vqdmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vqdmlsl_n_s16(__p0, vget_high_s16(__p1), __p2); + return __ret; +} +#else +__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vqdmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, (int8x8_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, (int8x8_t)__rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, (int8x8_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, (int8x8_t)__rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, (int8x16_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, (int8x16_t)__rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, (int8x16_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, (int8x16_t)__rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int64x2_t __ret; \ + __ret = vqdmlsl_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ + __ret; \ +}) +#else +#define vqdmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int64x2_t __ret; \ + __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int32x4_t __ret; \ + __ret = vqdmlsl_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ + __ret; \ +}) +#else +#define vqdmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1); + return __ret; +} +#else +__ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1); + return __ret; +} +__ai int32_t __noswap_vqdmulhs_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1); + return __ret; +} +#else +__ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1); + return __ret; +} +__ai int16_t __noswap_vqdmulhh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhs_lane_s32(__p0_146, __p1_146, __p2_146) __extension__ ({ \ + int32_t __s0_146 = __p0_146; \ + int32x2_t __s1_146 = __p1_146; \ + int32_t __ret_146; \ + __ret_146 = vqdmulhs_s32(__s0_146, vget_lane_s32(__s1_146, __p2_146)); \ + __ret_146; \ +}) +#else +#define vqdmulhs_lane_s32(__p0_147, __p1_147, __p2_147) __extension__ ({ \ + int32_t __s0_147 = __p0_147; \ + int32x2_t __s1_147 = __p1_147; \ + int32x2_t __rev1_147; __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, 1, 0); \ + int32_t __ret_147; \ + __ret_147 = __noswap_vqdmulhs_s32(__s0_147, __noswap_vget_lane_s32(__rev1_147, __p2_147)); \ + __ret_147; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhh_lane_s16(__p0_148, __p1_148, __p2_148) __extension__ ({ \ + int16_t __s0_148 = __p0_148; \ + int16x4_t __s1_148 = __p1_148; \ + int16_t __ret_148; \ + __ret_148 = vqdmulhh_s16(__s0_148, vget_lane_s16(__s1_148, __p2_148)); \ + __ret_148; \ +}) +#else +#define vqdmulhh_lane_s16(__p0_149, __p1_149, __p2_149) __extension__ ({ \ + int16_t __s0_149 = __p0_149; \ + int16x4_t __s1_149 = __p1_149; \ + int16x4_t __rev1_149; __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 3, 2, 1, 0); \ + int16_t __ret_149; \ + __ret_149 = __noswap_vqdmulhh_s16(__s0_149, __noswap_vget_lane_s16(__rev1_149, __p2_149)); \ + __ret_149; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhs_laneq_s32(__p0_150, __p1_150, __p2_150) __extension__ ({ \ + int32_t __s0_150 = __p0_150; \ + int32x4_t __s1_150 = __p1_150; \ + int32_t __ret_150; \ + __ret_150 = vqdmulhs_s32(__s0_150, vgetq_lane_s32(__s1_150, __p2_150)); \ + __ret_150; \ +}) +#else +#define vqdmulhs_laneq_s32(__p0_151, __p1_151, __p2_151) __extension__ ({ \ + int32_t __s0_151 = __p0_151; \ + int32x4_t __s1_151 = __p1_151; \ + int32x4_t __rev1_151; __rev1_151 = __builtin_shufflevector(__s1_151, __s1_151, 3, 2, 1, 0); \ + int32_t __ret_151; \ + __ret_151 = __noswap_vqdmulhs_s32(__s0_151, __noswap_vgetq_lane_s32(__rev1_151, __p2_151)); \ + __ret_151; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhh_laneq_s16(__p0_152, __p1_152, __p2_152) __extension__ ({ \ + int16_t __s0_152 = __p0_152; \ + int16x8_t __s1_152 = __p1_152; \ + int16_t __ret_152; \ + __ret_152 = vqdmulhh_s16(__s0_152, vgetq_lane_s16(__s1_152, __p2_152)); \ + __ret_152; \ +}) +#else +#define vqdmulhh_laneq_s16(__p0_153, __p1_153, __p2_153) __extension__ ({ \ + int16_t __s0_153 = __p0_153; \ + int16x8_t __s1_153 = __p1_153; \ + int16x8_t __rev1_153; __rev1_153 = __builtin_shufflevector(__s1_153, __s1_153, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16_t __ret_153; \ + __ret_153 = __noswap_vqdmulhh_s16(__s0_153, __noswap_vgetq_lane_s16(__rev1_153, __p2_153)); \ + __ret_153; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = vqdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __noswap_vqdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = vqdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = __noswap_vqdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = vqdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ + __ret; \ +}) +#else +#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x2_t __ret; \ + __ret = __noswap_vqdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = vqdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = __noswap_vqdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1); + return __ret; +} +#else +__ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1); + return __ret; +} +__ai int64_t __noswap_vqdmulls_s32(int32_t __p0, int32_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1); + return __ret; +} +#else +__ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1); + return __ret; +} +__ai int32_t __noswap_vqdmullh_s16(int16_t __p0, int16_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = vqdmull_s32(vget_high_s32(__p0), vget_high_s32(__p1)); + return __ret; +} +#else +__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = vqdmull_s16(vget_high_s16(__p0), vget_high_s16(__p1)); + return __ret; +} +#else +__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int64x2_t __ret; \ + __ret = vqdmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ + __ret; \ +}) +#else +#define vqdmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int64x2_t __ret; \ + __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = vqdmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vqdmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int64x2_t __ret; \ + __ret = vqdmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ + __ret; \ +}) +#else +#define vqdmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int64x2_t __ret; \ + __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = vqdmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vqdmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = vqdmull_n_s32(vget_high_s32(__p0), __p1); + return __ret; +} +#else +__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vqdmull_n_s32(__noswap_vget_high_s32(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = vqdmull_n_s16(vget_high_s16(__p0), __p1); + return __ret; +} +#else +__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vqdmull_n_s16(__noswap_vget_high_s16(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulls_lane_s32(__p0_154, __p1_154, __p2_154) __extension__ ({ \ + int32_t __s0_154 = __p0_154; \ + int32x2_t __s1_154 = __p1_154; \ + int64_t __ret_154; \ + __ret_154 = vqdmulls_s32(__s0_154, vget_lane_s32(__s1_154, __p2_154)); \ + __ret_154; \ +}) +#else +#define vqdmulls_lane_s32(__p0_155, __p1_155, __p2_155) __extension__ ({ \ + int32_t __s0_155 = __p0_155; \ + int32x2_t __s1_155 = __p1_155; \ + int32x2_t __rev1_155; __rev1_155 = __builtin_shufflevector(__s1_155, __s1_155, 1, 0); \ + int64_t __ret_155; \ + __ret_155 = __noswap_vqdmulls_s32(__s0_155, __noswap_vget_lane_s32(__rev1_155, __p2_155)); \ + __ret_155; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmullh_lane_s16(__p0_156, __p1_156, __p2_156) __extension__ ({ \ + int16_t __s0_156 = __p0_156; \ + int16x4_t __s1_156 = __p1_156; \ + int32_t __ret_156; \ + __ret_156 = vqdmullh_s16(__s0_156, vget_lane_s16(__s1_156, __p2_156)); \ + __ret_156; \ +}) +#else +#define vqdmullh_lane_s16(__p0_157, __p1_157, __p2_157) __extension__ ({ \ + int16_t __s0_157 = __p0_157; \ + int16x4_t __s1_157 = __p1_157; \ + int16x4_t __rev1_157; __rev1_157 = __builtin_shufflevector(__s1_157, __s1_157, 3, 2, 1, 0); \ + int32_t __ret_157; \ + __ret_157 = __noswap_vqdmullh_s16(__s0_157, __noswap_vget_lane_s16(__rev1_157, __p2_157)); \ + __ret_157; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulls_laneq_s32(__p0_158, __p1_158, __p2_158) __extension__ ({ \ + int32_t __s0_158 = __p0_158; \ + int32x4_t __s1_158 = __p1_158; \ + int64_t __ret_158; \ + __ret_158 = vqdmulls_s32(__s0_158, vgetq_lane_s32(__s1_158, __p2_158)); \ + __ret_158; \ +}) +#else +#define vqdmulls_laneq_s32(__p0_159, __p1_159, __p2_159) __extension__ ({ \ + int32_t __s0_159 = __p0_159; \ + int32x4_t __s1_159 = __p1_159; \ + int32x4_t __rev1_159; __rev1_159 = __builtin_shufflevector(__s1_159, __s1_159, 3, 2, 1, 0); \ + int64_t __ret_159; \ + __ret_159 = __noswap_vqdmulls_s32(__s0_159, __noswap_vgetq_lane_s32(__rev1_159, __p2_159)); \ + __ret_159; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmullh_laneq_s16(__p0_160, __p1_160, __p2_160) __extension__ ({ \ + int16_t __s0_160 = __p0_160; \ + int16x8_t __s1_160 = __p1_160; \ + int32_t __ret_160; \ + __ret_160 = vqdmullh_s16(__s0_160, vgetq_lane_s16(__s1_160, __p2_160)); \ + __ret_160; \ +}) +#else +#define vqdmullh_laneq_s16(__p0_161, __p1_161, __p2_161) __extension__ ({ \ + int16_t __s0_161 = __p0_161; \ + int16x8_t __s1_161 = __p1_161; \ + int16x8_t __rev1_161; __rev1_161 = __builtin_shufflevector(__s1_161, __s1_161, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32_t __ret_161; \ + __ret_161 = __noswap_vqdmullh_s16(__s0_161, __noswap_vgetq_lane_s16(__rev1_161, __p2_161)); \ + __ret_161; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int64x2_t __ret; \ + __ret = vqdmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ + __ret; \ +}) +#else +#define vqdmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int64x2_t __ret; \ + __ret = __noswap_vqdmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = vqdmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vqdmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __noswap_vqdmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vqmovns_s32(int32_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0); + return __ret; +} +#else +__ai int16_t vqmovns_s32(int32_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vqmovnd_s64(int64_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0); + return __ret; +} +#else +__ai int32_t vqmovnd_s64(int64_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vqmovnh_s16(int16_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0); + return __ret; +} +#else +__ai int8_t vqmovnh_s16(int16_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vqmovns_u32(uint32_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0); + return __ret; +} +#else +__ai uint16_t vqmovns_u32(uint32_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vqmovnd_u64(uint64_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0); + return __ret; +} +#else +__ai uint32_t vqmovnd_u64(uint64_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8_t vqmovnh_u16(uint16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0); + return __ret; +} +#else +__ai uint8_t vqmovnh_u16(uint16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vqmovn_u32(__p1)); + return __ret; +} +#else +__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vcombine_u16(__rev0, __noswap_vqmovn_u32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vqmovn_u64(__p1)); + return __ret; +} +#else +__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vcombine_u32(__rev0, __noswap_vqmovn_u64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vqmovn_u16(__p1)); + return __ret; +} +#else +__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __noswap_vcombine_u8(__rev0, __noswap_vqmovn_u16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vqmovn_s32(__p1)); + return __ret; +} +#else +__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vcombine_s16(__rev0, __noswap_vqmovn_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vqmovn_s64(__p1)); + return __ret; +} +#else +__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x4_t __ret; + __ret = __noswap_vcombine_s32(__rev0, __noswap_vqmovn_s64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vqmovn_s16(__p1)); + return __ret; +} +#else +__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __noswap_vcombine_s8(__rev0, __noswap_vqmovn_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vqmovuns_s32(int32_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqmovuns_s32(__p0); + return __ret; +} +#else +__ai int16_t vqmovuns_s32(int32_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqmovuns_s32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vqmovund_s64(int64_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqmovund_s64(__p0); + return __ret; +} +#else +__ai int32_t vqmovund_s64(int64_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqmovund_s64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vqmovunh_s16(int16_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqmovunh_s16(__p0); + return __ret; +} +#else +__ai int8_t vqmovunh_s16(int16_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqmovunh_s16(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vqmovun_high_s32(int16x4_t __p0, int32x4_t __p1) { + uint16x8_t __ret; + __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1)); + return __ret; +} +#else +__ai uint16x8_t vqmovun_high_s32(int16x4_t __p0, int32x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vcombine_u16((uint16x4_t)(__rev0), __noswap_vqmovun_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vqmovun_high_s64(int32x2_t __p0, int64x2_t __p1) { + uint32x4_t __ret; + __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1)); + return __ret; +} +#else +__ai uint32x4_t vqmovun_high_s64(int32x2_t __p0, int64x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vcombine_u32((uint32x2_t)(__rev0), __noswap_vqmovun_s64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqmovun_high_s16(int8x8_t __p0, int16x8_t __p1) { + uint8x16_t __ret; + __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1)); + return __ret; +} +#else +__ai uint8x16_t vqmovun_high_s16(int8x8_t __p0, int16x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __noswap_vcombine_u8((uint8x8_t)(__rev0), __noswap_vqmovun_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vqnegq_s64(int64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai int64x2_t vqnegq_s64(int64x2_t __p0) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vqneg_s64(int64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3); + return __ret; +} +#else +__ai int64x1_t vqneg_s64(int64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vqnegb_s8(int8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0); + return __ret; +} +#else +__ai int8_t vqnegb_s8(int8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vqnegs_s32(int32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0); + return __ret; +} +#else +__ai int32_t vqnegs_s32(int32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vqnegd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0); + return __ret; +} +#else +__ai int64_t vqnegd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vqnegh_s16(int16_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0); + return __ret; +} +#else +__ai int16_t vqnegh_s16(int16_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1); + return __ret; +} +#else +__ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1); + return __ret; +} +__ai int32_t __noswap_vqrdmulhs_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1); + return __ret; +} +#else +__ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1); + return __ret; +} +__ai int16_t __noswap_vqrdmulhh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhs_lane_s32(__p0_162, __p1_162, __p2_162) __extension__ ({ \ + int32_t __s0_162 = __p0_162; \ + int32x2_t __s1_162 = __p1_162; \ + int32_t __ret_162; \ + __ret_162 = vqrdmulhs_s32(__s0_162, vget_lane_s32(__s1_162, __p2_162)); \ + __ret_162; \ +}) +#else +#define vqrdmulhs_lane_s32(__p0_163, __p1_163, __p2_163) __extension__ ({ \ + int32_t __s0_163 = __p0_163; \ + int32x2_t __s1_163 = __p1_163; \ + int32x2_t __rev1_163; __rev1_163 = __builtin_shufflevector(__s1_163, __s1_163, 1, 0); \ + int32_t __ret_163; \ + __ret_163 = __noswap_vqrdmulhs_s32(__s0_163, __noswap_vget_lane_s32(__rev1_163, __p2_163)); \ + __ret_163; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhh_lane_s16(__p0_164, __p1_164, __p2_164) __extension__ ({ \ + int16_t __s0_164 = __p0_164; \ + int16x4_t __s1_164 = __p1_164; \ + int16_t __ret_164; \ + __ret_164 = vqrdmulhh_s16(__s0_164, vget_lane_s16(__s1_164, __p2_164)); \ + __ret_164; \ +}) +#else +#define vqrdmulhh_lane_s16(__p0_165, __p1_165, __p2_165) __extension__ ({ \ + int16_t __s0_165 = __p0_165; \ + int16x4_t __s1_165 = __p1_165; \ + int16x4_t __rev1_165; __rev1_165 = __builtin_shufflevector(__s1_165, __s1_165, 3, 2, 1, 0); \ + int16_t __ret_165; \ + __ret_165 = __noswap_vqrdmulhh_s16(__s0_165, __noswap_vget_lane_s16(__rev1_165, __p2_165)); \ + __ret_165; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhs_laneq_s32(__p0_166, __p1_166, __p2_166) __extension__ ({ \ + int32_t __s0_166 = __p0_166; \ + int32x4_t __s1_166 = __p1_166; \ + int32_t __ret_166; \ + __ret_166 = vqrdmulhs_s32(__s0_166, vgetq_lane_s32(__s1_166, __p2_166)); \ + __ret_166; \ +}) +#else +#define vqrdmulhs_laneq_s32(__p0_167, __p1_167, __p2_167) __extension__ ({ \ + int32_t __s0_167 = __p0_167; \ + int32x4_t __s1_167 = __p1_167; \ + int32x4_t __rev1_167; __rev1_167 = __builtin_shufflevector(__s1_167, __s1_167, 3, 2, 1, 0); \ + int32_t __ret_167; \ + __ret_167 = __noswap_vqrdmulhs_s32(__s0_167, __noswap_vgetq_lane_s32(__rev1_167, __p2_167)); \ + __ret_167; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhh_laneq_s16(__p0_168, __p1_168, __p2_168) __extension__ ({ \ + int16_t __s0_168 = __p0_168; \ + int16x8_t __s1_168 = __p1_168; \ + int16_t __ret_168; \ + __ret_168 = vqrdmulhh_s16(__s0_168, vgetq_lane_s16(__s1_168, __p2_168)); \ + __ret_168; \ +}) +#else +#define vqrdmulhh_laneq_s16(__p0_169, __p1_169, __p2_169) __extension__ ({ \ + int16_t __s0_169 = __p0_169; \ + int16x8_t __s1_169 = __p1_169; \ + int16x8_t __rev1_169; __rev1_169 = __builtin_shufflevector(__s1_169, __s1_169, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16_t __ret_169; \ + __ret_169 = __noswap_vqrdmulhh_s16(__s0_169, __noswap_vgetq_lane_s16(__rev1_169, __p2_169)); \ + __ret_169; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __ret; \ + __ret = vqrdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __noswap_vqrdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __ret; \ + __ret = vqrdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret; \ + __ret = __noswap_vqrdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __ret; \ + __ret = vqrdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ + __ret; \ +}) +#else +#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int32x2_t __ret; \ + __ret = __noswap_vqrdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __ret; \ + __ret = vqrdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ + __ret; \ +}) +#else +#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __ret; \ + __ret = __noswap_vqrdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8_t vqrshlb_u8(uint8_t __p0, uint8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1); + return __ret; +} +#else +__ai uint8_t vqrshlb_u8(uint8_t __p0, uint8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vqrshls_u32(uint32_t __p0, uint32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1); + return __ret; +} +#else +__ai uint32_t vqrshls_u32(uint32_t __p0, uint32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vqrshld_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1); + return __ret; +} +#else +__ai uint64_t vqrshld_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vqrshlh_u16(uint16_t __p0, uint16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1); + return __ret; +} +#else +__ai uint16_t vqrshlh_u16(uint16_t __p0, uint16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1); + return __ret; +} +#else +__ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1); + return __ret; +} +#else +__ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1); + return __ret; +} +#else +__ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1); + return __ret; +} +#else +__ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_high_n_u32(__p0_170, __p1_170, __p2_170) __extension__ ({ \ + uint16x4_t __s0_170 = __p0_170; \ + uint32x4_t __s1_170 = __p1_170; \ + uint16x8_t __ret_170; \ + __ret_170 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_170), (uint16x4_t)(vqrshrn_n_u32(__s1_170, __p2_170)))); \ + __ret_170; \ +}) +#else +#define vqrshrn_high_n_u32(__p0_171, __p1_171, __p2_171) __extension__ ({ \ + uint16x4_t __s0_171 = __p0_171; \ + uint32x4_t __s1_171 = __p1_171; \ + uint16x4_t __rev0_171; __rev0_171 = __builtin_shufflevector(__s0_171, __s0_171, 3, 2, 1, 0); \ + uint32x4_t __rev1_171; __rev1_171 = __builtin_shufflevector(__s1_171, __s1_171, 3, 2, 1, 0); \ + uint16x8_t __ret_171; \ + __ret_171 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_171), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_171, __p2_171)))); \ + __ret_171 = __builtin_shufflevector(__ret_171, __ret_171, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_171; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_high_n_u64(__p0_172, __p1_172, __p2_172) __extension__ ({ \ + uint32x2_t __s0_172 = __p0_172; \ + uint64x2_t __s1_172 = __p1_172; \ + uint32x4_t __ret_172; \ + __ret_172 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_172), (uint32x2_t)(vqrshrn_n_u64(__s1_172, __p2_172)))); \ + __ret_172; \ +}) +#else +#define vqrshrn_high_n_u64(__p0_173, __p1_173, __p2_173) __extension__ ({ \ + uint32x2_t __s0_173 = __p0_173; \ + uint64x2_t __s1_173 = __p1_173; \ + uint32x2_t __rev0_173; __rev0_173 = __builtin_shufflevector(__s0_173, __s0_173, 1, 0); \ + uint64x2_t __rev1_173; __rev1_173 = __builtin_shufflevector(__s1_173, __s1_173, 1, 0); \ + uint32x4_t __ret_173; \ + __ret_173 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_173), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_173, __p2_173)))); \ + __ret_173 = __builtin_shufflevector(__ret_173, __ret_173, 3, 2, 1, 0); \ + __ret_173; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_high_n_u16(__p0_174, __p1_174, __p2_174) __extension__ ({ \ + uint8x8_t __s0_174 = __p0_174; \ + uint16x8_t __s1_174 = __p1_174; \ + uint8x16_t __ret_174; \ + __ret_174 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_174), (uint8x8_t)(vqrshrn_n_u16(__s1_174, __p2_174)))); \ + __ret_174; \ +}) +#else +#define vqrshrn_high_n_u16(__p0_175, __p1_175, __p2_175) __extension__ ({ \ + uint8x8_t __s0_175 = __p0_175; \ + uint16x8_t __s1_175 = __p1_175; \ + uint8x8_t __rev0_175; __rev0_175 = __builtin_shufflevector(__s0_175, __s0_175, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_175; __rev1_175 = __builtin_shufflevector(__s1_175, __s1_175, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret_175; \ + __ret_175 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_175), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_175, __p2_175)))); \ + __ret_175 = __builtin_shufflevector(__ret_175, __ret_175, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_175; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_high_n_s32(__p0_176, __p1_176, __p2_176) __extension__ ({ \ + int16x4_t __s0_176 = __p0_176; \ + int32x4_t __s1_176 = __p1_176; \ + int16x8_t __ret_176; \ + __ret_176 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_176), (int16x4_t)(vqrshrn_n_s32(__s1_176, __p2_176)))); \ + __ret_176; \ +}) +#else +#define vqrshrn_high_n_s32(__p0_177, __p1_177, __p2_177) __extension__ ({ \ + int16x4_t __s0_177 = __p0_177; \ + int32x4_t __s1_177 = __p1_177; \ + int16x4_t __rev0_177; __rev0_177 = __builtin_shufflevector(__s0_177, __s0_177, 3, 2, 1, 0); \ + int32x4_t __rev1_177; __rev1_177 = __builtin_shufflevector(__s1_177, __s1_177, 3, 2, 1, 0); \ + int16x8_t __ret_177; \ + __ret_177 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_177), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_177, __p2_177)))); \ + __ret_177 = __builtin_shufflevector(__ret_177, __ret_177, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_177; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_high_n_s64(__p0_178, __p1_178, __p2_178) __extension__ ({ \ + int32x2_t __s0_178 = __p0_178; \ + int64x2_t __s1_178 = __p1_178; \ + int32x4_t __ret_178; \ + __ret_178 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_178), (int32x2_t)(vqrshrn_n_s64(__s1_178, __p2_178)))); \ + __ret_178; \ +}) +#else +#define vqrshrn_high_n_s64(__p0_179, __p1_179, __p2_179) __extension__ ({ \ + int32x2_t __s0_179 = __p0_179; \ + int64x2_t __s1_179 = __p1_179; \ + int32x2_t __rev0_179; __rev0_179 = __builtin_shufflevector(__s0_179, __s0_179, 1, 0); \ + int64x2_t __rev1_179; __rev1_179 = __builtin_shufflevector(__s1_179, __s1_179, 1, 0); \ + int32x4_t __ret_179; \ + __ret_179 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_179), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_179, __p2_179)))); \ + __ret_179 = __builtin_shufflevector(__ret_179, __ret_179, 3, 2, 1, 0); \ + __ret_179; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_high_n_s16(__p0_180, __p1_180, __p2_180) __extension__ ({ \ + int8x8_t __s0_180 = __p0_180; \ + int16x8_t __s1_180 = __p1_180; \ + int8x16_t __ret_180; \ + __ret_180 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_180), (int8x8_t)(vqrshrn_n_s16(__s1_180, __p2_180)))); \ + __ret_180; \ +}) +#else +#define vqrshrn_high_n_s16(__p0_181, __p1_181, __p2_181) __extension__ ({ \ + int8x8_t __s0_181 = __p0_181; \ + int16x8_t __s1_181 = __p1_181; \ + int8x8_t __rev0_181; __rev0_181 = __builtin_shufflevector(__s0_181, __s0_181, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_181; __rev1_181 = __builtin_shufflevector(__s1_181, __s1_181, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret_181; \ + __ret_181 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_181), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_181, __p2_181)))); \ + __ret_181 = __builtin_shufflevector(__ret_181, __ret_181, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_181; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \ + uint32_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \ + __ret; \ +}) +#else +#define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \ + uint32_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \ + __ret; \ +}) +#else +#define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \ + uint16_t __s0 = __p0; \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \ + __ret; \ +}) +#else +#define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \ + uint16_t __s0 = __p0; \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \ + __ret; \ +}) +#else +#define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \ + __ret; \ +}) +#else +#define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \ + int16_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \ + __ret; \ +}) +#else +#define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \ + int16_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_high_n_s32(__p0_182, __p1_182, __p2_182) __extension__ ({ \ + int16x4_t __s0_182 = __p0_182; \ + int32x4_t __s1_182 = __p1_182; \ + int16x8_t __ret_182; \ + __ret_182 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_182), (int16x4_t)(vqrshrun_n_s32(__s1_182, __p2_182)))); \ + __ret_182; \ +}) +#else +#define vqrshrun_high_n_s32(__p0_183, __p1_183, __p2_183) __extension__ ({ \ + int16x4_t __s0_183 = __p0_183; \ + int32x4_t __s1_183 = __p1_183; \ + int16x4_t __rev0_183; __rev0_183 = __builtin_shufflevector(__s0_183, __s0_183, 3, 2, 1, 0); \ + int32x4_t __rev1_183; __rev1_183 = __builtin_shufflevector(__s1_183, __s1_183, 3, 2, 1, 0); \ + int16x8_t __ret_183; \ + __ret_183 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_183), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_183, __p2_183)))); \ + __ret_183 = __builtin_shufflevector(__ret_183, __ret_183, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_183; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_high_n_s64(__p0_184, __p1_184, __p2_184) __extension__ ({ \ + int32x2_t __s0_184 = __p0_184; \ + int64x2_t __s1_184 = __p1_184; \ + int32x4_t __ret_184; \ + __ret_184 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_184), (int32x2_t)(vqrshrun_n_s64(__s1_184, __p2_184)))); \ + __ret_184; \ +}) +#else +#define vqrshrun_high_n_s64(__p0_185, __p1_185, __p2_185) __extension__ ({ \ + int32x2_t __s0_185 = __p0_185; \ + int64x2_t __s1_185 = __p1_185; \ + int32x2_t __rev0_185; __rev0_185 = __builtin_shufflevector(__s0_185, __s0_185, 1, 0); \ + int64x2_t __rev1_185; __rev1_185 = __builtin_shufflevector(__s1_185, __s1_185, 1, 0); \ + int32x4_t __ret_185; \ + __ret_185 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_185), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_185, __p2_185)))); \ + __ret_185 = __builtin_shufflevector(__ret_185, __ret_185, 3, 2, 1, 0); \ + __ret_185; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_high_n_s16(__p0_186, __p1_186, __p2_186) __extension__ ({ \ + int8x8_t __s0_186 = __p0_186; \ + int16x8_t __s1_186 = __p1_186; \ + int8x16_t __ret_186; \ + __ret_186 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_186), (int8x8_t)(vqrshrun_n_s16(__s1_186, __p2_186)))); \ + __ret_186; \ +}) +#else +#define vqrshrun_high_n_s16(__p0_187, __p1_187, __p2_187) __extension__ ({ \ + int8x8_t __s0_187 = __p0_187; \ + int16x8_t __s1_187 = __p1_187; \ + int8x8_t __rev0_187; __rev0_187 = __builtin_shufflevector(__s0_187, __s0_187, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_187; __rev1_187 = __builtin_shufflevector(__s1_187, __s1_187, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret_187; \ + __ret_187 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_187), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_187, __p2_187)))); \ + __ret_187 = __builtin_shufflevector(__ret_187, __ret_187, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_187; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \ + __ret; \ +}) +#else +#define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \ + __ret; \ +}) +#else +#define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \ + int16_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \ + __ret; \ +}) +#else +#define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \ + int16_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8_t vqshlb_u8(uint8_t __p0, uint8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1); + return __ret; +} +#else +__ai uint8_t vqshlb_u8(uint8_t __p0, uint8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vqshls_u32(uint32_t __p0, uint32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1); + return __ret; +} +#else +__ai uint32_t vqshls_u32(uint32_t __p0, uint32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vqshld_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1); + return __ret; +} +#else +__ai uint64_t vqshld_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vqshlh_u16(uint16_t __p0, uint16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1); + return __ret; +} +#else +__ai uint16_t vqshlh_u16(uint16_t __p0, uint16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1); + return __ret; +} +#else +__ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1); + return __ret; +} +#else +__ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1); + return __ret; +} +#else +__ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1); + return __ret; +} +#else +__ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlb_n_u8(__p0, __p1) __extension__ ({ \ + uint8_t __s0 = __p0; \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \ + __ret; \ +}) +#else +#define vqshlb_n_u8(__p0, __p1) __extension__ ({ \ + uint8_t __s0 = __p0; \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshls_n_u32(__p0, __p1) __extension__ ({ \ + uint32_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \ + __ret; \ +}) +#else +#define vqshls_n_u32(__p0, __p1) __extension__ ({ \ + uint32_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshld_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \ + __ret; \ +}) +#else +#define vqshld_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlh_n_u16(__p0, __p1) __extension__ ({ \ + uint16_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \ + __ret; \ +}) +#else +#define vqshlh_n_u16(__p0, __p1) __extension__ ({ \ + uint16_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlb_n_s8(__p0, __p1) __extension__ ({ \ + int8_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \ + __ret; \ +}) +#else +#define vqshlb_n_s8(__p0, __p1) __extension__ ({ \ + int8_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshls_n_s32(__p0, __p1) __extension__ ({ \ + int32_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \ + __ret; \ +}) +#else +#define vqshls_n_s32(__p0, __p1) __extension__ ({ \ + int32_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshld_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \ + __ret; \ +}) +#else +#define vqshld_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlh_n_s16(__p0, __p1) __extension__ ({ \ + int16_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \ + __ret; \ +}) +#else +#define vqshlh_n_s16(__p0, __p1) __extension__ ({ \ + int16_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlub_n_s8(__p0, __p1) __extension__ ({ \ + int8_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \ + __ret; \ +}) +#else +#define vqshlub_n_s8(__p0, __p1) __extension__ ({ \ + int8_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlus_n_s32(__p0, __p1) __extension__ ({ \ + int32_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \ + __ret; \ +}) +#else +#define vqshlus_n_s32(__p0, __p1) __extension__ ({ \ + int32_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlud_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \ + __ret; \ +}) +#else +#define vqshlud_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshluh_n_s16(__p0, __p1) __extension__ ({ \ + int16_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \ + __ret; \ +}) +#else +#define vqshluh_n_s16(__p0, __p1) __extension__ ({ \ + int16_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_u32(__p0_188, __p1_188, __p2_188) __extension__ ({ \ + uint16x4_t __s0_188 = __p0_188; \ + uint32x4_t __s1_188 = __p1_188; \ + uint16x8_t __ret_188; \ + __ret_188 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_188), (uint16x4_t)(vqshrn_n_u32(__s1_188, __p2_188)))); \ + __ret_188; \ +}) +#else +#define vqshrn_high_n_u32(__p0_189, __p1_189, __p2_189) __extension__ ({ \ + uint16x4_t __s0_189 = __p0_189; \ + uint32x4_t __s1_189 = __p1_189; \ + uint16x4_t __rev0_189; __rev0_189 = __builtin_shufflevector(__s0_189, __s0_189, 3, 2, 1, 0); \ + uint32x4_t __rev1_189; __rev1_189 = __builtin_shufflevector(__s1_189, __s1_189, 3, 2, 1, 0); \ + uint16x8_t __ret_189; \ + __ret_189 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_189), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_189, __p2_189)))); \ + __ret_189 = __builtin_shufflevector(__ret_189, __ret_189, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_189; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_u64(__p0_190, __p1_190, __p2_190) __extension__ ({ \ + uint32x2_t __s0_190 = __p0_190; \ + uint64x2_t __s1_190 = __p1_190; \ + uint32x4_t __ret_190; \ + __ret_190 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_190), (uint32x2_t)(vqshrn_n_u64(__s1_190, __p2_190)))); \ + __ret_190; \ +}) +#else +#define vqshrn_high_n_u64(__p0_191, __p1_191, __p2_191) __extension__ ({ \ + uint32x2_t __s0_191 = __p0_191; \ + uint64x2_t __s1_191 = __p1_191; \ + uint32x2_t __rev0_191; __rev0_191 = __builtin_shufflevector(__s0_191, __s0_191, 1, 0); \ + uint64x2_t __rev1_191; __rev1_191 = __builtin_shufflevector(__s1_191, __s1_191, 1, 0); \ + uint32x4_t __ret_191; \ + __ret_191 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_191), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_191, __p2_191)))); \ + __ret_191 = __builtin_shufflevector(__ret_191, __ret_191, 3, 2, 1, 0); \ + __ret_191; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_u16(__p0_192, __p1_192, __p2_192) __extension__ ({ \ + uint8x8_t __s0_192 = __p0_192; \ + uint16x8_t __s1_192 = __p1_192; \ + uint8x16_t __ret_192; \ + __ret_192 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_192), (uint8x8_t)(vqshrn_n_u16(__s1_192, __p2_192)))); \ + __ret_192; \ +}) +#else +#define vqshrn_high_n_u16(__p0_193, __p1_193, __p2_193) __extension__ ({ \ + uint8x8_t __s0_193 = __p0_193; \ + uint16x8_t __s1_193 = __p1_193; \ + uint8x8_t __rev0_193; __rev0_193 = __builtin_shufflevector(__s0_193, __s0_193, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_193; __rev1_193 = __builtin_shufflevector(__s1_193, __s1_193, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret_193; \ + __ret_193 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_193), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_193, __p2_193)))); \ + __ret_193 = __builtin_shufflevector(__ret_193, __ret_193, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_193; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_s32(__p0_194, __p1_194, __p2_194) __extension__ ({ \ + int16x4_t __s0_194 = __p0_194; \ + int32x4_t __s1_194 = __p1_194; \ + int16x8_t __ret_194; \ + __ret_194 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_194), (int16x4_t)(vqshrn_n_s32(__s1_194, __p2_194)))); \ + __ret_194; \ +}) +#else +#define vqshrn_high_n_s32(__p0_195, __p1_195, __p2_195) __extension__ ({ \ + int16x4_t __s0_195 = __p0_195; \ + int32x4_t __s1_195 = __p1_195; \ + int16x4_t __rev0_195; __rev0_195 = __builtin_shufflevector(__s0_195, __s0_195, 3, 2, 1, 0); \ + int32x4_t __rev1_195; __rev1_195 = __builtin_shufflevector(__s1_195, __s1_195, 3, 2, 1, 0); \ + int16x8_t __ret_195; \ + __ret_195 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_195), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_195, __p2_195)))); \ + __ret_195 = __builtin_shufflevector(__ret_195, __ret_195, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_195; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_s64(__p0_196, __p1_196, __p2_196) __extension__ ({ \ + int32x2_t __s0_196 = __p0_196; \ + int64x2_t __s1_196 = __p1_196; \ + int32x4_t __ret_196; \ + __ret_196 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_196), (int32x2_t)(vqshrn_n_s64(__s1_196, __p2_196)))); \ + __ret_196; \ +}) +#else +#define vqshrn_high_n_s64(__p0_197, __p1_197, __p2_197) __extension__ ({ \ + int32x2_t __s0_197 = __p0_197; \ + int64x2_t __s1_197 = __p1_197; \ + int32x2_t __rev0_197; __rev0_197 = __builtin_shufflevector(__s0_197, __s0_197, 1, 0); \ + int64x2_t __rev1_197; __rev1_197 = __builtin_shufflevector(__s1_197, __s1_197, 1, 0); \ + int32x4_t __ret_197; \ + __ret_197 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_197), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_197, __p2_197)))); \ + __ret_197 = __builtin_shufflevector(__ret_197, __ret_197, 3, 2, 1, 0); \ + __ret_197; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_s16(__p0_198, __p1_198, __p2_198) __extension__ ({ \ + int8x8_t __s0_198 = __p0_198; \ + int16x8_t __s1_198 = __p1_198; \ + int8x16_t __ret_198; \ + __ret_198 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_198), (int8x8_t)(vqshrn_n_s16(__s1_198, __p2_198)))); \ + __ret_198; \ +}) +#else +#define vqshrn_high_n_s16(__p0_199, __p1_199, __p2_199) __extension__ ({ \ + int8x8_t __s0_199 = __p0_199; \ + int16x8_t __s1_199 = __p1_199; \ + int8x8_t __rev0_199; __rev0_199 = __builtin_shufflevector(__s0_199, __s0_199, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_199; __rev1_199 = __builtin_shufflevector(__s1_199, __s1_199, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret_199; \ + __ret_199 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_199), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_199, __p2_199)))); \ + __ret_199 = __builtin_shufflevector(__ret_199, __ret_199, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_199; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrns_n_u32(__p0, __p1) __extension__ ({ \ + uint32_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \ + __ret; \ +}) +#else +#define vqshrns_n_u32(__p0, __p1) __extension__ ({ \ + uint32_t __s0 = __p0; \ + uint16_t __ret; \ + __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \ + __ret; \ +}) +#else +#define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint32_t __ret; \ + __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \ + uint16_t __s0 = __p0; \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \ + __ret; \ +}) +#else +#define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \ + uint16_t __s0 = __p0; \ + uint8_t __ret; \ + __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrns_n_s32(__p0, __p1) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \ + __ret; \ +}) +#else +#define vqshrns_n_s32(__p0, __p1) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \ + __ret; \ +}) +#else +#define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \ + int16_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \ + __ret; \ +}) +#else +#define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \ + int16_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_high_n_s32(__p0_200, __p1_200, __p2_200) __extension__ ({ \ + int16x4_t __s0_200 = __p0_200; \ + int32x4_t __s1_200 = __p1_200; \ + int16x8_t __ret_200; \ + __ret_200 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_200), (int16x4_t)(vqshrun_n_s32(__s1_200, __p2_200)))); \ + __ret_200; \ +}) +#else +#define vqshrun_high_n_s32(__p0_201, __p1_201, __p2_201) __extension__ ({ \ + int16x4_t __s0_201 = __p0_201; \ + int32x4_t __s1_201 = __p1_201; \ + int16x4_t __rev0_201; __rev0_201 = __builtin_shufflevector(__s0_201, __s0_201, 3, 2, 1, 0); \ + int32x4_t __rev1_201; __rev1_201 = __builtin_shufflevector(__s1_201, __s1_201, 3, 2, 1, 0); \ + int16x8_t __ret_201; \ + __ret_201 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_201), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_201, __p2_201)))); \ + __ret_201 = __builtin_shufflevector(__ret_201, __ret_201, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_201; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_high_n_s64(__p0_202, __p1_202, __p2_202) __extension__ ({ \ + int32x2_t __s0_202 = __p0_202; \ + int64x2_t __s1_202 = __p1_202; \ + int32x4_t __ret_202; \ + __ret_202 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_202), (int32x2_t)(vqshrun_n_s64(__s1_202, __p2_202)))); \ + __ret_202; \ +}) +#else +#define vqshrun_high_n_s64(__p0_203, __p1_203, __p2_203) __extension__ ({ \ + int32x2_t __s0_203 = __p0_203; \ + int64x2_t __s1_203 = __p1_203; \ + int32x2_t __rev0_203; __rev0_203 = __builtin_shufflevector(__s0_203, __s0_203, 1, 0); \ + int64x2_t __rev1_203; __rev1_203 = __builtin_shufflevector(__s1_203, __s1_203, 1, 0); \ + int32x4_t __ret_203; \ + __ret_203 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_203), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_203, __p2_203)))); \ + __ret_203 = __builtin_shufflevector(__ret_203, __ret_203, 3, 2, 1, 0); \ + __ret_203; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_high_n_s16(__p0_204, __p1_204, __p2_204) __extension__ ({ \ + int8x8_t __s0_204 = __p0_204; \ + int16x8_t __s1_204 = __p1_204; \ + int8x16_t __ret_204; \ + __ret_204 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_204), (int8x8_t)(vqshrun_n_s16(__s1_204, __p2_204)))); \ + __ret_204; \ +}) +#else +#define vqshrun_high_n_s16(__p0_205, __p1_205, __p2_205) __extension__ ({ \ + int8x8_t __s0_205 = __p0_205; \ + int16x8_t __s1_205 = __p1_205; \ + int8x8_t __rev0_205; __rev0_205 = __builtin_shufflevector(__s0_205, __s0_205, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_205; __rev1_205 = __builtin_shufflevector(__s1_205, __s1_205, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret_205; \ + __ret_205 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_205), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_205, __p2_205)))); \ + __ret_205 = __builtin_shufflevector(__ret_205, __ret_205, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_205; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshruns_n_s32(__p0, __p1) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \ + __ret; \ +}) +#else +#define vqshruns_n_s32(__p0, __p1) __extension__ ({ \ + int32_t __s0 = __p0; \ + int16_t __ret; \ + __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrund_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \ + __ret; \ +}) +#else +#define vqshrund_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int32_t __ret; \ + __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \ + int16_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \ + __ret; \ +}) +#else +#define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \ + int16_t __s0 = __p0; \ + int8_t __ret; \ + __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1); + return __ret; +} +#else +__ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1); + return __ret; +} +#else +__ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1); + return __ret; +} +#else +__ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1); + return __ret; +} +#else +__ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1); + return __ret; +} +#else +__ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1); + return __ret; +} +#else +__ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1); + return __ret; +} +__ai int32_t __noswap_vqsubs_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1); + return __ret; +} +#else +__ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1); + return __ret; +} +#else +__ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1); + return __ret; +} +__ai int16_t __noswap_vqsubh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqtbl1_s8(int8x16_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vqtbl1_s8(int8x16_t __p0, int8x8_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) { + poly8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) { + poly8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) { + uint8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, int8x16_t __p1) { + int8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) { + uint8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, int8x8_t __p1) { + int8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) { + poly8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) { + poly8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) { + uint8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, int8x16_t __p1) { + int8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) { + uint8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, int8x8_t __p1) { + int8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) { + poly8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) { + poly8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) { + uint8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, int8x16_t __p1) { + int8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) { + uint8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, int8x8_t __p1) { + int8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36); + return __ret; +} +#else +__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, int8x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 36); + return __ret; +} +#else +__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, int8x16_t __p2) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, int8x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 36); + return __ret; +} +#else +__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, int8x16_t __p2) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, int8x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 36); + return __ret; +} +#else +__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, int8x16_t __p2) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, int8x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vraddhn_u32(__p1, __p2)); + return __ret; +} +#else +__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vcombine_u16(__rev0, __noswap_vraddhn_u32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vraddhn_u64(__p1, __p2)); + return __ret; +} +#else +__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vcombine_u32(__rev0, __noswap_vraddhn_u64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vraddhn_u16(__p1, __p2)); + return __ret; +} +#else +__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __noswap_vcombine_u8(__rev0, __noswap_vraddhn_u16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vraddhn_s32(__p1, __p2)); + return __ret; +} +#else +__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vcombine_s16(__rev0, __noswap_vraddhn_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vraddhn_s64(__p1, __p2)); + return __ret; +} +#else +__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vcombine_s32(__rev0, __noswap_vraddhn_s64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vraddhn_s16(__p1, __p2)); + return __ret; +} +#else +__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __noswap_vcombine_s8(__rev0, __noswap_vraddhn_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vrbit_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 4); + return __ret; +} +#else +__ai poly8x8_t vrbit_p8(poly8x8_t __p0) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 36); + return __ret; +} +#else +__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vrbitq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai int8x16_t vrbitq_s8(int8x16_t __p0) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vrbit_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai uint8x8_t vrbit_u8(uint8x8_t __p0) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vrbit_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai int8x8_t vrbit_s8(int8x8_t __p0) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrecpeq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrecpeq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vrecpe_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10); + return __ret; +} +#else +__ai float64x1_t vrecpe_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vrecped_f64(float64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrecped_f64(__p0); + return __ret; +} +#else +__ai float64_t vrecped_f64(float64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrecped_f64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vrecpes_f32(float32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0); + return __ret; +} +#else +__ai float32_t vrecpes_f32(float32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#else +__ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1); + return __ret; +} +#else +__ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1); + return __ret; +} +#else +__ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vrecpxd_f64(float64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0); + return __ret; +} +#else +__ai float64_t vrecpxd_f64(float64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vrecpxs_f32(float32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0); + return __ret; +} +#else +__ai float32_t vrecpxs_f32(float32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vrshld_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1); + return __ret; +} +#else +__ai uint64_t vrshld_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1); + return __ret; +} +#else +__ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrd_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \ + __ret; \ +}) +#else +#define vrshrd_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrd_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \ + __ret; \ +}) +#else +#define vrshrd_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_u32(__p0_206, __p1_206, __p2_206) __extension__ ({ \ + uint16x4_t __s0_206 = __p0_206; \ + uint32x4_t __s1_206 = __p1_206; \ + uint16x8_t __ret_206; \ + __ret_206 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_206), (uint16x4_t)(vrshrn_n_u32(__s1_206, __p2_206)))); \ + __ret_206; \ +}) +#else +#define vrshrn_high_n_u32(__p0_207, __p1_207, __p2_207) __extension__ ({ \ + uint16x4_t __s0_207 = __p0_207; \ + uint32x4_t __s1_207 = __p1_207; \ + uint16x4_t __rev0_207; __rev0_207 = __builtin_shufflevector(__s0_207, __s0_207, 3, 2, 1, 0); \ + uint32x4_t __rev1_207; __rev1_207 = __builtin_shufflevector(__s1_207, __s1_207, 3, 2, 1, 0); \ + uint16x8_t __ret_207; \ + __ret_207 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_207), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_207, __p2_207)))); \ + __ret_207 = __builtin_shufflevector(__ret_207, __ret_207, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_207; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_u64(__p0_208, __p1_208, __p2_208) __extension__ ({ \ + uint32x2_t __s0_208 = __p0_208; \ + uint64x2_t __s1_208 = __p1_208; \ + uint32x4_t __ret_208; \ + __ret_208 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_208), (uint32x2_t)(vrshrn_n_u64(__s1_208, __p2_208)))); \ + __ret_208; \ +}) +#else +#define vrshrn_high_n_u64(__p0_209, __p1_209, __p2_209) __extension__ ({ \ + uint32x2_t __s0_209 = __p0_209; \ + uint64x2_t __s1_209 = __p1_209; \ + uint32x2_t __rev0_209; __rev0_209 = __builtin_shufflevector(__s0_209, __s0_209, 1, 0); \ + uint64x2_t __rev1_209; __rev1_209 = __builtin_shufflevector(__s1_209, __s1_209, 1, 0); \ + uint32x4_t __ret_209; \ + __ret_209 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_209), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_209, __p2_209)))); \ + __ret_209 = __builtin_shufflevector(__ret_209, __ret_209, 3, 2, 1, 0); \ + __ret_209; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_u16(__p0_210, __p1_210, __p2_210) __extension__ ({ \ + uint8x8_t __s0_210 = __p0_210; \ + uint16x8_t __s1_210 = __p1_210; \ + uint8x16_t __ret_210; \ + __ret_210 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_210), (uint8x8_t)(vrshrn_n_u16(__s1_210, __p2_210)))); \ + __ret_210; \ +}) +#else +#define vrshrn_high_n_u16(__p0_211, __p1_211, __p2_211) __extension__ ({ \ + uint8x8_t __s0_211 = __p0_211; \ + uint16x8_t __s1_211 = __p1_211; \ + uint8x8_t __rev0_211; __rev0_211 = __builtin_shufflevector(__s0_211, __s0_211, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_211; __rev1_211 = __builtin_shufflevector(__s1_211, __s1_211, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret_211; \ + __ret_211 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_211), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_211, __p2_211)))); \ + __ret_211 = __builtin_shufflevector(__ret_211, __ret_211, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_211; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_s32(__p0_212, __p1_212, __p2_212) __extension__ ({ \ + int16x4_t __s0_212 = __p0_212; \ + int32x4_t __s1_212 = __p1_212; \ + int16x8_t __ret_212; \ + __ret_212 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_212), (int16x4_t)(vrshrn_n_s32(__s1_212, __p2_212)))); \ + __ret_212; \ +}) +#else +#define vrshrn_high_n_s32(__p0_213, __p1_213, __p2_213) __extension__ ({ \ + int16x4_t __s0_213 = __p0_213; \ + int32x4_t __s1_213 = __p1_213; \ + int16x4_t __rev0_213; __rev0_213 = __builtin_shufflevector(__s0_213, __s0_213, 3, 2, 1, 0); \ + int32x4_t __rev1_213; __rev1_213 = __builtin_shufflevector(__s1_213, __s1_213, 3, 2, 1, 0); \ + int16x8_t __ret_213; \ + __ret_213 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_213), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_213, __p2_213)))); \ + __ret_213 = __builtin_shufflevector(__ret_213, __ret_213, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_213; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_s64(__p0_214, __p1_214, __p2_214) __extension__ ({ \ + int32x2_t __s0_214 = __p0_214; \ + int64x2_t __s1_214 = __p1_214; \ + int32x4_t __ret_214; \ + __ret_214 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_214), (int32x2_t)(vrshrn_n_s64(__s1_214, __p2_214)))); \ + __ret_214; \ +}) +#else +#define vrshrn_high_n_s64(__p0_215, __p1_215, __p2_215) __extension__ ({ \ + int32x2_t __s0_215 = __p0_215; \ + int64x2_t __s1_215 = __p1_215; \ + int32x2_t __rev0_215; __rev0_215 = __builtin_shufflevector(__s0_215, __s0_215, 1, 0); \ + int64x2_t __rev1_215; __rev1_215 = __builtin_shufflevector(__s1_215, __s1_215, 1, 0); \ + int32x4_t __ret_215; \ + __ret_215 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_215), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_215, __p2_215)))); \ + __ret_215 = __builtin_shufflevector(__ret_215, __ret_215, 3, 2, 1, 0); \ + __ret_215; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_s16(__p0_216, __p1_216, __p2_216) __extension__ ({ \ + int8x8_t __s0_216 = __p0_216; \ + int16x8_t __s1_216 = __p1_216; \ + int8x16_t __ret_216; \ + __ret_216 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_216), (int8x8_t)(vrshrn_n_s16(__s1_216, __p2_216)))); \ + __ret_216; \ +}) +#else +#define vrshrn_high_n_s16(__p0_217, __p1_217, __p2_217) __extension__ ({ \ + int8x8_t __s0_217 = __p0_217; \ + int16x8_t __s1_217 = __p1_217; \ + int8x8_t __rev0_217; __rev0_217 = __builtin_shufflevector(__s0_217, __s0_217, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_217; __rev1_217 = __builtin_shufflevector(__s1_217, __s1_217, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret_217; \ + __ret_217 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_217), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_217, __p2_217)))); \ + __ret_217 = __builtin_shufflevector(__ret_217, __ret_217, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_217; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrsqrteq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vrsqrteq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vrsqrte_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10); + return __ret; +} +#else +__ai float64x1_t vrsqrte_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vrsqrted_f64(float64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0); + return __ret; +} +#else +__ai float64_t vrsqrted_f64(float64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vrsqrtes_f32(float32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0); + return __ret; +} +#else +__ai float32_t vrsqrtes_f32(float32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#else +__ai float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1); + return __ret; +} +#else +__ai float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1); + return __ret; +} +#else +__ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64_t __s1 = __p1; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \ + __ret; \ +}) +#else +#define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64_t __s1 = __p1; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __s1 = __p1; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \ + __ret; \ +}) +#else +#define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __s1 = __p1; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vrsubhn_u32(__p1, __p2)); + return __ret; +} +#else +__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vcombine_u16(__rev0, __noswap_vrsubhn_u32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vrsubhn_u64(__p1, __p2)); + return __ret; +} +#else +__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vcombine_u32(__rev0, __noswap_vrsubhn_u64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vrsubhn_u16(__p1, __p2)); + return __ret; +} +#else +__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __noswap_vcombine_u8(__rev0, __noswap_vrsubhn_u16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vrsubhn_s32(__p1, __p2)); + return __ret; +} +#else +__ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vcombine_s16(__rev0, __noswap_vrsubhn_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vrsubhn_s64(__p1, __p2)); + return __ret; +} +#else +__ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vcombine_s32(__rev0, __noswap_vrsubhn_s64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vrsubhn_s16(__p1, __p2)); + return __ret; +} +#else +__ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __noswap_vcombine_s8(__rev0, __noswap_vrsubhn_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#define __noswap_vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#define __noswap_vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vshld_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1); + return __ret; +} +#else +__ai uint64_t vshld_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vshld_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1); + return __ret; +} +#else +__ai int64_t vshld_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshld_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \ + __ret; \ +}) +#else +#define vshld_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshld_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \ + __ret; \ +}) +#else +#define vshld_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_u8(__p0_218, __p1_218) __extension__ ({ \ + uint8x16_t __s0_218 = __p0_218; \ + uint16x8_t __ret_218; \ + __ret_218 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_218), __p1_218)); \ + __ret_218; \ +}) +#else +#define vshll_high_n_u8(__p0_219, __p1_219) __extension__ ({ \ + uint8x16_t __s0_219 = __p0_219; \ + uint8x16_t __rev0_219; __rev0_219 = __builtin_shufflevector(__s0_219, __s0_219, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __ret_219; \ + __ret_219 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_219), __p1_219)); \ + __ret_219 = __builtin_shufflevector(__ret_219, __ret_219, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_219; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_u32(__p0_220, __p1_220) __extension__ ({ \ + uint32x4_t __s0_220 = __p0_220; \ + uint64x2_t __ret_220; \ + __ret_220 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_220), __p1_220)); \ + __ret_220; \ +}) +#else +#define vshll_high_n_u32(__p0_221, __p1_221) __extension__ ({ \ + uint32x4_t __s0_221 = __p0_221; \ + uint32x4_t __rev0_221; __rev0_221 = __builtin_shufflevector(__s0_221, __s0_221, 3, 2, 1, 0); \ + uint64x2_t __ret_221; \ + __ret_221 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_221), __p1_221)); \ + __ret_221 = __builtin_shufflevector(__ret_221, __ret_221, 1, 0); \ + __ret_221; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_u16(__p0_222, __p1_222) __extension__ ({ \ + uint16x8_t __s0_222 = __p0_222; \ + uint32x4_t __ret_222; \ + __ret_222 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_222), __p1_222)); \ + __ret_222; \ +}) +#else +#define vshll_high_n_u16(__p0_223, __p1_223) __extension__ ({ \ + uint16x8_t __s0_223 = __p0_223; \ + uint16x8_t __rev0_223; __rev0_223 = __builtin_shufflevector(__s0_223, __s0_223, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint32x4_t __ret_223; \ + __ret_223 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_223), __p1_223)); \ + __ret_223 = __builtin_shufflevector(__ret_223, __ret_223, 3, 2, 1, 0); \ + __ret_223; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_s8(__p0_224, __p1_224) __extension__ ({ \ + int8x16_t __s0_224 = __p0_224; \ + int16x8_t __ret_224; \ + __ret_224 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_224), __p1_224)); \ + __ret_224; \ +}) +#else +#define vshll_high_n_s8(__p0_225, __p1_225) __extension__ ({ \ + int8x16_t __s0_225 = __p0_225; \ + int8x16_t __rev0_225; __rev0_225 = __builtin_shufflevector(__s0_225, __s0_225, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __ret_225; \ + __ret_225 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_225), __p1_225)); \ + __ret_225 = __builtin_shufflevector(__ret_225, __ret_225, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_225; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_s32(__p0_226, __p1_226) __extension__ ({ \ + int32x4_t __s0_226 = __p0_226; \ + int64x2_t __ret_226; \ + __ret_226 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_226), __p1_226)); \ + __ret_226; \ +}) +#else +#define vshll_high_n_s32(__p0_227, __p1_227) __extension__ ({ \ + int32x4_t __s0_227 = __p0_227; \ + int32x4_t __rev0_227; __rev0_227 = __builtin_shufflevector(__s0_227, __s0_227, 3, 2, 1, 0); \ + int64x2_t __ret_227; \ + __ret_227 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_227), __p1_227)); \ + __ret_227 = __builtin_shufflevector(__ret_227, __ret_227, 1, 0); \ + __ret_227; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_s16(__p0_228, __p1_228) __extension__ ({ \ + int16x8_t __s0_228 = __p0_228; \ + int32x4_t __ret_228; \ + __ret_228 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_228), __p1_228)); \ + __ret_228; \ +}) +#else +#define vshll_high_n_s16(__p0_229, __p1_229) __extension__ ({ \ + int16x8_t __s0_229 = __p0_229; \ + int16x8_t __rev0_229; __rev0_229 = __builtin_shufflevector(__s0_229, __s0_229, 7, 6, 5, 4, 3, 2, 1, 0); \ + int32x4_t __ret_229; \ + __ret_229 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_229), __p1_229)); \ + __ret_229 = __builtin_shufflevector(__ret_229, __ret_229, 3, 2, 1, 0); \ + __ret_229; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrd_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \ + __ret; \ +}) +#else +#define vshrd_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrd_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \ + __ret; \ +}) +#else +#define vshrd_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_high_n_u32(__p0_230, __p1_230, __p2_230) __extension__ ({ \ + uint16x4_t __s0_230 = __p0_230; \ + uint32x4_t __s1_230 = __p1_230; \ + uint16x8_t __ret_230; \ + __ret_230 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_230), (uint16x4_t)(vshrn_n_u32(__s1_230, __p2_230)))); \ + __ret_230; \ +}) +#else +#define vshrn_high_n_u32(__p0_231, __p1_231, __p2_231) __extension__ ({ \ + uint16x4_t __s0_231 = __p0_231; \ + uint32x4_t __s1_231 = __p1_231; \ + uint16x4_t __rev0_231; __rev0_231 = __builtin_shufflevector(__s0_231, __s0_231, 3, 2, 1, 0); \ + uint32x4_t __rev1_231; __rev1_231 = __builtin_shufflevector(__s1_231, __s1_231, 3, 2, 1, 0); \ + uint16x8_t __ret_231; \ + __ret_231 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_231), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_231, __p2_231)))); \ + __ret_231 = __builtin_shufflevector(__ret_231, __ret_231, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_231; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_high_n_u64(__p0_232, __p1_232, __p2_232) __extension__ ({ \ + uint32x2_t __s0_232 = __p0_232; \ + uint64x2_t __s1_232 = __p1_232; \ + uint32x4_t __ret_232; \ + __ret_232 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_232), (uint32x2_t)(vshrn_n_u64(__s1_232, __p2_232)))); \ + __ret_232; \ +}) +#else +#define vshrn_high_n_u64(__p0_233, __p1_233, __p2_233) __extension__ ({ \ + uint32x2_t __s0_233 = __p0_233; \ + uint64x2_t __s1_233 = __p1_233; \ + uint32x2_t __rev0_233; __rev0_233 = __builtin_shufflevector(__s0_233, __s0_233, 1, 0); \ + uint64x2_t __rev1_233; __rev1_233 = __builtin_shufflevector(__s1_233, __s1_233, 1, 0); \ + uint32x4_t __ret_233; \ + __ret_233 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_233), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_233, __p2_233)))); \ + __ret_233 = __builtin_shufflevector(__ret_233, __ret_233, 3, 2, 1, 0); \ + __ret_233; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_high_n_u16(__p0_234, __p1_234, __p2_234) __extension__ ({ \ + uint8x8_t __s0_234 = __p0_234; \ + uint16x8_t __s1_234 = __p1_234; \ + uint8x16_t __ret_234; \ + __ret_234 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_234), (uint8x8_t)(vshrn_n_u16(__s1_234, __p2_234)))); \ + __ret_234; \ +}) +#else +#define vshrn_high_n_u16(__p0_235, __p1_235, __p2_235) __extension__ ({ \ + uint8x8_t __s0_235 = __p0_235; \ + uint16x8_t __s1_235 = __p1_235; \ + uint8x8_t __rev0_235; __rev0_235 = __builtin_shufflevector(__s0_235, __s0_235, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_235; __rev1_235 = __builtin_shufflevector(__s1_235, __s1_235, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __ret_235; \ + __ret_235 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_235), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_235, __p2_235)))); \ + __ret_235 = __builtin_shufflevector(__ret_235, __ret_235, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_235; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_high_n_s32(__p0_236, __p1_236, __p2_236) __extension__ ({ \ + int16x4_t __s0_236 = __p0_236; \ + int32x4_t __s1_236 = __p1_236; \ + int16x8_t __ret_236; \ + __ret_236 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_236), (int16x4_t)(vshrn_n_s32(__s1_236, __p2_236)))); \ + __ret_236; \ +}) +#else +#define vshrn_high_n_s32(__p0_237, __p1_237, __p2_237) __extension__ ({ \ + int16x4_t __s0_237 = __p0_237; \ + int32x4_t __s1_237 = __p1_237; \ + int16x4_t __rev0_237; __rev0_237 = __builtin_shufflevector(__s0_237, __s0_237, 3, 2, 1, 0); \ + int32x4_t __rev1_237; __rev1_237 = __builtin_shufflevector(__s1_237, __s1_237, 3, 2, 1, 0); \ + int16x8_t __ret_237; \ + __ret_237 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_237), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_237, __p2_237)))); \ + __ret_237 = __builtin_shufflevector(__ret_237, __ret_237, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_237; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_high_n_s64(__p0_238, __p1_238, __p2_238) __extension__ ({ \ + int32x2_t __s0_238 = __p0_238; \ + int64x2_t __s1_238 = __p1_238; \ + int32x4_t __ret_238; \ + __ret_238 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_238), (int32x2_t)(vshrn_n_s64(__s1_238, __p2_238)))); \ + __ret_238; \ +}) +#else +#define vshrn_high_n_s64(__p0_239, __p1_239, __p2_239) __extension__ ({ \ + int32x2_t __s0_239 = __p0_239; \ + int64x2_t __s1_239 = __p1_239; \ + int32x2_t __rev0_239; __rev0_239 = __builtin_shufflevector(__s0_239, __s0_239, 1, 0); \ + int64x2_t __rev1_239; __rev1_239 = __builtin_shufflevector(__s1_239, __s1_239, 1, 0); \ + int32x4_t __ret_239; \ + __ret_239 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_239), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_239, __p2_239)))); \ + __ret_239 = __builtin_shufflevector(__ret_239, __ret_239, 3, 2, 1, 0); \ + __ret_239; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_high_n_s16(__p0_240, __p1_240, __p2_240) __extension__ ({ \ + int8x8_t __s0_240 = __p0_240; \ + int16x8_t __s1_240 = __p1_240; \ + int8x16_t __ret_240; \ + __ret_240 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_240), (int8x8_t)(vshrn_n_s16(__s1_240, __p2_240)))); \ + __ret_240; \ +}) +#else +#define vshrn_high_n_s16(__p0_241, __p1_241, __p2_241) __extension__ ({ \ + int8x8_t __s0_241 = __p0_241; \ + int16x8_t __s1_241 = __p1_241; \ + int8x8_t __rev0_241; __rev0_241 = __builtin_shufflevector(__s0_241, __s0_241, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_241; __rev1_241 = __builtin_shufflevector(__s1_241, __s1_241, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __ret_241; \ + __ret_241 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_241), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_241, __p2_241)))); \ + __ret_241 = __builtin_shufflevector(__ret_241, __ret_241, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_241; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64_t __s1 = __p1; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \ + __ret; \ +}) +#else +#define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64_t __s1 = __p1; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __s1 = __p1; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \ + __ret; \ +}) +#else +#define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __s1 = __p1; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) +#else +#define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \ + __ret; \ +}) +#else +#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8_t vsqaddb_u8(uint8_t __p0, uint8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1); + return __ret; +} +#else +__ai uint8_t vsqaddb_u8(uint8_t __p0, uint8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32_t vsqadds_u32(uint32_t __p0, uint32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1); + return __ret; +} +#else +__ai uint32_t vsqadds_u32(uint32_t __p0, uint32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vsqaddd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1); + return __ret; +} +#else +__ai uint64_t vsqaddd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16_t vsqaddh_u16(uint16_t __p0, uint16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1); + return __ret; +} +#else +__ai uint16_t vsqaddh_u16(uint16_t __p0, uint16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vsqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai uint8x8_t vsqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vsqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai uint32x2_t vsqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vsqadd_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#else +__ai uint64x1_t vsqadd_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vsqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai uint16x4_t vsqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vsqrtq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai float64x2_t vsqrtq_f64(float64x2_t __p0) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vsqrtq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai float32x4_t vsqrtq_f32(float32x4_t __p0) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vsqrt_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10); + return __ret; +} +#else +__ai float64x1_t vsqrt_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vsqrt_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai float32x2_t vsqrt_f32(float32x2_t __p0) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64_t __s1 = __p1; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \ + __ret; \ +}) +#else +#define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64_t __s1 = __p1; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __s1 = __p1; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \ + __ret; \ +}) +#else +#define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __s1 = __p1; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64_t __s1 = __p1; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \ + __ret; \ +}) +#else +#define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __s0 = __p0; \ + uint64_t __s1 = __p1; \ + uint64_t __ret; \ + __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __s1 = __p1; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \ + __ret; \ +}) +#else +#define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __s0 = __p0; \ + int64_t __s1 = __p1; \ + int64_t __ret; \ + __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) +#else +#define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \ + __ret; \ +}) +#else +#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \ +}) +#else +#define vst1_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 38); \ +}) +#else +#define vst1q_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 42); \ +}) +#else +#define vst1q_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \ +}) +#else +#define vst1_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \ +}) +#else +#define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \ +}) +#else +#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \ +}) +#else +#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \ +}) +#else +#define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p8_x2(__p0, __p1) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \ +}) +#else +#define vst1_p8_x2(__p0, __p1) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + poly8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p64_x2(__p0, __p1) __extension__ ({ \ + poly64x1x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \ +}) +#else +#define vst1_p64_x2(__p0, __p1) __extension__ ({ \ + poly64x1x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p16_x2(__p0, __p1) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \ +}) +#else +#define vst1_p16_x2(__p0, __p1) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + poly16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \ +}) +#else +#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + poly8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \ +}) +#else +#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + poly64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \ +}) +#else +#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + poly16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \ +}) +#else +#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + uint8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \ +}) +#else +#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + uint32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \ +}) +#else +#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + uint64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \ +}) +#else +#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + uint16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \ +}) +#else +#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + int8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 42); \ +}) +#else +#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + float64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 41); \ +}) +#else +#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + float32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f16_x2(__p0, __p1) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 40); \ +}) +#else +#define vst1q_f16_x2(__p0, __p1) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + float16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 34); \ +}) +#else +#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + int32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 35); \ +}) +#else +#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + int64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 33); \ +}) +#else +#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + int16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u8_x2(__p0, __p1) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \ +}) +#else +#define vst1_u8_x2(__p0, __p1) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + uint8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u32_x2(__p0, __p1) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \ +}) +#else +#define vst1_u32_x2(__p0, __p1) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + uint32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u64_x2(__p0, __p1) __extension__ ({ \ + uint64x1x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \ +}) +#else +#define vst1_u64_x2(__p0, __p1) __extension__ ({ \ + uint64x1x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u16_x2(__p0, __p1) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \ +}) +#else +#define vst1_u16_x2(__p0, __p1) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + uint16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s8_x2(__p0, __p1) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \ +}) +#else +#define vst1_s8_x2(__p0, __p1) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + int8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f64_x2(__p0, __p1) __extension__ ({ \ + float64x1x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 10); \ +}) +#else +#define vst1_f64_x2(__p0, __p1) __extension__ ({ \ + float64x1x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 10); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f32_x2(__p0, __p1) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 9); \ +}) +#else +#define vst1_f32_x2(__p0, __p1) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + float32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f16_x2(__p0, __p1) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 8); \ +}) +#else +#define vst1_f16_x2(__p0, __p1) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + float16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s32_x2(__p0, __p1) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 2); \ +}) +#else +#define vst1_s32_x2(__p0, __p1) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + int32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 2); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s64_x2(__p0, __p1) __extension__ ({ \ + int64x1x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 3); \ +}) +#else +#define vst1_s64_x2(__p0, __p1) __extension__ ({ \ + int64x1x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 3); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s16_x2(__p0, __p1) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 1); \ +}) +#else +#define vst1_s16_x2(__p0, __p1) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + int16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p8_x3(__p0, __p1) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \ +}) +#else +#define vst1_p8_x3(__p0, __p1) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + poly8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p64_x3(__p0, __p1) __extension__ ({ \ + poly64x1x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \ +}) +#else +#define vst1_p64_x3(__p0, __p1) __extension__ ({ \ + poly64x1x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p16_x3(__p0, __p1) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \ +}) +#else +#define vst1_p16_x3(__p0, __p1) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + poly16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \ +}) +#else +#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + poly8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \ +}) +#else +#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + poly64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \ +}) +#else +#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + poly16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \ +}) +#else +#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + uint8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \ +}) +#else +#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + uint32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \ +}) +#else +#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + uint64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \ +}) +#else +#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + uint16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \ +}) +#else +#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + int8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 42); \ +}) +#else +#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + float64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 41); \ +}) +#else +#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + float32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f16_x3(__p0, __p1) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 40); \ +}) +#else +#define vst1q_f16_x3(__p0, __p1) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + float16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 34); \ +}) +#else +#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + int32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 35); \ +}) +#else +#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + int64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 33); \ +}) +#else +#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + int16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u8_x3(__p0, __p1) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \ +}) +#else +#define vst1_u8_x3(__p0, __p1) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + uint8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u32_x3(__p0, __p1) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \ +}) +#else +#define vst1_u32_x3(__p0, __p1) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + uint32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u64_x3(__p0, __p1) __extension__ ({ \ + uint64x1x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \ +}) +#else +#define vst1_u64_x3(__p0, __p1) __extension__ ({ \ + uint64x1x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u16_x3(__p0, __p1) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \ +}) +#else +#define vst1_u16_x3(__p0, __p1) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + uint16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s8_x3(__p0, __p1) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \ +}) +#else +#define vst1_s8_x3(__p0, __p1) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + int8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f64_x3(__p0, __p1) __extension__ ({ \ + float64x1x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \ +}) +#else +#define vst1_f64_x3(__p0, __p1) __extension__ ({ \ + float64x1x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f32_x3(__p0, __p1) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 9); \ +}) +#else +#define vst1_f32_x3(__p0, __p1) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + float32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f16_x3(__p0, __p1) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 8); \ +}) +#else +#define vst1_f16_x3(__p0, __p1) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + float16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s32_x3(__p0, __p1) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 2); \ +}) +#else +#define vst1_s32_x3(__p0, __p1) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + int32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 2); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s64_x3(__p0, __p1) __extension__ ({ \ + int64x1x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \ +}) +#else +#define vst1_s64_x3(__p0, __p1) __extension__ ({ \ + int64x1x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s16_x3(__p0, __p1) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 1); \ +}) +#else +#define vst1_s16_x3(__p0, __p1) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + int16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p8_x4(__p0, __p1) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \ +}) +#else +#define vst1_p8_x4(__p0, __p1) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + poly8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p64_x4(__p0, __p1) __extension__ ({ \ + poly64x1x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \ +}) +#else +#define vst1_p64_x4(__p0, __p1) __extension__ ({ \ + poly64x1x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p16_x4(__p0, __p1) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \ +}) +#else +#define vst1_p16_x4(__p0, __p1) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + poly16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \ +}) +#else +#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + poly8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \ +}) +#else +#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + poly64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \ +}) +#else +#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + poly16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \ +}) +#else +#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + uint8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \ +}) +#else +#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + uint32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \ +}) +#else +#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + uint64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \ +}) +#else +#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + uint16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \ +}) +#else +#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + int8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 42); \ +}) +#else +#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + float64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 41); \ +}) +#else +#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + float32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f16_x4(__p0, __p1) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 40); \ +}) +#else +#define vst1q_f16_x4(__p0, __p1) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + float16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 34); \ +}) +#else +#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + int32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 35); \ +}) +#else +#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + int64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 33); \ +}) +#else +#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + int16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u8_x4(__p0, __p1) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \ +}) +#else +#define vst1_u8_x4(__p0, __p1) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + uint8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u32_x4(__p0, __p1) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \ +}) +#else +#define vst1_u32_x4(__p0, __p1) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + uint32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u64_x4(__p0, __p1) __extension__ ({ \ + uint64x1x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \ +}) +#else +#define vst1_u64_x4(__p0, __p1) __extension__ ({ \ + uint64x1x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u16_x4(__p0, __p1) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \ +}) +#else +#define vst1_u16_x4(__p0, __p1) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + uint16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s8_x4(__p0, __p1) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \ +}) +#else +#define vst1_s8_x4(__p0, __p1) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + int8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f64_x4(__p0, __p1) __extension__ ({ \ + float64x1x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \ +}) +#else +#define vst1_f64_x4(__p0, __p1) __extension__ ({ \ + float64x1x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f32_x4(__p0, __p1) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 9); \ +}) +#else +#define vst1_f32_x4(__p0, __p1) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + float32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f16_x4(__p0, __p1) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 8); \ +}) +#else +#define vst1_f16_x4(__p0, __p1) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + float16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s32_x4(__p0, __p1) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 2); \ +}) +#else +#define vst1_s32_x4(__p0, __p1) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + int32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 2); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s64_x4(__p0, __p1) __extension__ ({ \ + int64x1x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \ +}) +#else +#define vst1_s64_x4(__p0, __p1) __extension__ ({ \ + int64x1x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s16_x4(__p0, __p1) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 1); \ +}) +#else +#define vst1_s16_x4(__p0, __p1) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + int16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_p64(__p0, __p1) __extension__ ({ \ + poly64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \ +}) +#else +#define vst2_p64(__p0, __p1) __extension__ ({ \ + poly64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \ +}) +#else +#define vst2q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + poly64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \ +}) +#else +#define vst2q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + uint64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_f64(__p0, __p1) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 42); \ +}) +#else +#define vst2q_f64(__p0, __p1) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + float64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_s64(__p0, __p1) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 35); \ +}) +#else +#define vst2q_s64(__p0, __p1) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + int64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_f64(__p0, __p1) __extension__ ({ \ + float64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 10); \ +}) +#else +#define vst2_f64(__p0, __p1) __extension__ ({ \ + float64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 10); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \ +}) +#else +#define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \ +}) +#else +#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + poly8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \ +}) +#else +#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + poly64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \ +}) +#else +#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + uint8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \ +}) +#else +#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + uint64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \ +}) +#else +#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + int8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 42); \ +}) +#else +#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + float64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 35); \ +}) +#else +#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + int64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \ +}) +#else +#define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 10); \ +}) +#else +#define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 10); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 3); \ +}) +#else +#define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 3); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_p64(__p0, __p1) __extension__ ({ \ + poly64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \ +}) +#else +#define vst3_p64(__p0, __p1) __extension__ ({ \ + poly64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \ +}) +#else +#define vst3q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + poly64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \ +}) +#else +#define vst3q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + uint64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_f64(__p0, __p1) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 42); \ +}) +#else +#define vst3q_f64(__p0, __p1) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + float64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_s64(__p0, __p1) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 35); \ +}) +#else +#define vst3q_s64(__p0, __p1) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + int64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_f64(__p0, __p1) __extension__ ({ \ + float64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \ +}) +#else +#define vst3_f64(__p0, __p1) __extension__ ({ \ + float64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \ +}) +#else +#define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \ +}) +#else +#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + poly8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \ +}) +#else +#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + poly64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \ +}) +#else +#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + uint8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \ +}) +#else +#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + uint64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \ +}) +#else +#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + int8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 42); \ +}) +#else +#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + float64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 35); \ +}) +#else +#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + int64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \ +}) +#else +#define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \ +}) +#else +#define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \ +}) +#else +#define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_p64(__p0, __p1) __extension__ ({ \ + poly64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \ +}) +#else +#define vst4_p64(__p0, __p1) __extension__ ({ \ + poly64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \ +}) +#else +#define vst4q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + poly64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \ +}) +#else +#define vst4q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + uint64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_f64(__p0, __p1) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 42); \ +}) +#else +#define vst4q_f64(__p0, __p1) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + float64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_s64(__p0, __p1) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 35); \ +}) +#else +#define vst4q_s64(__p0, __p1) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + int64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_f64(__p0, __p1) __extension__ ({ \ + float64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \ +}) +#else +#define vst4_f64(__p0, __p1) __extension__ ({ \ + float64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \ +}) +#else +#define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \ +}) +#else +#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + poly8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \ +}) +#else +#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + poly64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \ +}) +#else +#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + uint8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \ +}) +#else +#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + uint64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \ +}) +#else +#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + int8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 42); \ +}) +#else +#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + float64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 35); \ +}) +#else +#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + int64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \ +}) +#else +#define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \ +}) +#else +#define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \ +}) +#else +#define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vstrq_p128(__p0, __p1) __extension__ ({ \ + poly128_t __s1 = __p1; \ + __builtin_neon_vstrq_p128(__p0, __s1); \ +}) +#else +#define vstrq_p128(__p0, __p1) __extension__ ({ \ + poly128_t __s1 = __p1; \ + __builtin_neon_vstrq_p128(__p0, __s1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1); + return __ret; +} +#else +__ai uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vsubd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1); + return __ret; +} +#else +__ai int64_t vsubd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vsubhn_u32(__p1, __p2)); + return __ret; +} +#else +__ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vcombine_u16(__rev0, __noswap_vsubhn_u32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vsubhn_u64(__p1, __p2)); + return __ret; +} +#else +__ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vcombine_u32(__rev0, __noswap_vsubhn_u64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vsubhn_u16(__p1, __p2)); + return __ret; +} +#else +__ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __noswap_vcombine_u8(__rev0, __noswap_vsubhn_u16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vsubhn_s32(__p1, __p2)); + return __ret; +} +#else +__ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vcombine_s16(__rev0, __noswap_vsubhn_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vsubhn_s64(__p1, __p2)); + return __ret; +} +#else +__ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vcombine_s32(__rev0, __noswap_vsubhn_s64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vsubhn_s16(__p1, __p2)); + return __ret; +} +#else +__ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __noswap_vcombine_s8(__rev0, __noswap_vsubhn_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = vmovl_high_u8(__p0) - vmovl_high_u8(__p1); + return __ret; +} +#else +__ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vmovl_high_u8(__rev0) - __noswap_vmovl_high_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = vmovl_high_u32(__p0) - vmovl_high_u32(__p1); + return __ret; +} +#else +__ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vmovl_high_u32(__rev0) - __noswap_vmovl_high_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = vmovl_high_u16(__p0) - vmovl_high_u16(__p1); + return __ret; +} +#else +__ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vmovl_high_u16(__rev0) - __noswap_vmovl_high_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = vmovl_high_s8(__p0) - vmovl_high_s8(__p1); + return __ret; +} +#else +__ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vmovl_high_s8(__rev0) - __noswap_vmovl_high_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = vmovl_high_s32(__p0) - vmovl_high_s32(__p1); + return __ret; +} +#else +__ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vmovl_high_s32(__rev0) - __noswap_vmovl_high_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = vmovl_high_s16(__p0) - vmovl_high_s16(__p1); + return __ret; +} +#else +__ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vmovl_high_s16(__rev0) - __noswap_vmovl_high_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = __p0 - vmovl_high_u8(__p1); + return __ret; +} +#else +__ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 - __noswap_vmovl_high_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = __p0 - vmovl_high_u32(__p1); + return __ret; +} +#else +__ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = __rev0 - __noswap_vmovl_high_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = __p0 - vmovl_high_u16(__p1); + return __ret; +} +#else +__ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 - __noswap_vmovl_high_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = __p0 - vmovl_high_s8(__p1); + return __ret; +} +#else +__ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 - __noswap_vmovl_high_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = __p0 - vmovl_high_s32(__p1); + return __ret; +} +#else +__ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __rev0 - __noswap_vmovl_high_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = __p0 - vmovl_high_s16(__p1); + return __ret; +} +#else +__ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 - __noswap_vmovl_high_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + return __ret; +} +#else +__ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + poly64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + return __ret; +} +#else +__ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + return __ret; +} +#else +__ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + return __ret; +} +#else +__ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + poly64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + return __ret; +} +#else +__ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + return __ret; +} +#else +__ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#else +__ai uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#else +__ai uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#else +__ai uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1); + return __ret; +} +#else +__ai uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vtstd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vtstd_s64(__p0, __p1); + return __ret; +} +#else +__ai int64_t vtstd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vtstd_s64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8_t vuqaddb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1); + return __ret; +} +#else +__ai int8_t vuqaddb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vuqadds_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1); + return __ret; +} +#else +__ai int32_t vuqadds_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64_t vuqaddd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1); + return __ret; +} +#else +__ai int64_t vuqaddd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vuqaddh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1); + return __ret; +} +#else +__ai int16_t vuqaddh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vuqaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai int8x16_t vuqaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vuqaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai int32x4_t vuqaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vuqaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai int64x2_t vuqaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vuqaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai int16x8_t vuqaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vuqadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai int8x8_t vuqadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vuqadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai int32x2_t vuqadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x1_t vuqadd_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#else +__ai int64x1_t vuqadd_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vuqadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai int16x4_t vuqadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + return __ret; +} +#else +__ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + poly64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + return __ret; +} +#else +__ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + return __ret; +} +#else +__ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + return __ret; +} +#else +__ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + poly64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + return __ret; +} +#else +__ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + return __ret; +} +#else +__ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + return __ret; +} +#else +__ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + poly64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + return __ret; +} +#else +__ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + return __ret; +} +#else +__ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + return __ret; +} +#else +__ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + poly64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + return __ret; +} +#else +__ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + return __ret; +} +#else +__ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#endif +#ifdef __LITTLE_ENDIAN__ +__ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = __p0 + vabdq_u8(__p1, __p2); + return __ret; +} +#else +__ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __ret; + __ret = __rev0 + __noswap_vabdq_u8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 + vabdq_u32(__p1, __p2); + return __ret; +} +#else +__ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 + __noswap_vabdq_u32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 + vabdq_u16(__p1, __p2); + return __ret; +} +#else +__ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 + __noswap_vabdq_u16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = __p0 + vabdq_s8(__p1, __p2); + return __ret; +} +#else +__ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __ret; + __ret = __rev0 + __noswap_vabdq_s8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = __p0 + vabdq_s32(__p1, __p2); + return __ret; +} +#else +__ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 + __noswap_vabdq_s32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = __p0 + vabdq_s16(__p1, __p2); + return __ret; +} +#else +__ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 + __noswap_vabdq_s16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = __p0 + vabd_u8(__p1, __p2); + return __ret; +} +#else +__ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __ret; + __ret = __rev0 + __noswap_vabd_u8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __ret; + __ret = __p0 + vabd_u32(__p1, __p2); + return __ret; +} +#else +__ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint32x2_t __ret; + __ret = __rev0 + __noswap_vabd_u32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __ret; + __ret = __p0 + vabd_u16(__p1, __p2); + return __ret; +} +#else +__ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint16x4_t __ret; + __ret = __rev0 + __noswap_vabd_u16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = __p0 + vabd_s8(__p1, __p2); + return __ret; +} +#else +__ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __ret; + __ret = __rev0 + __noswap_vabd_s8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = __p0 + vabd_s32(__p1, __p2); + return __ret; +} +#else +__ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int32x2_t __ret; + __ret = __rev0 + __noswap_vabd_s32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = __p0 + vabd_s16(__p1, __p2); + return __ret; +} +#else +__ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int16x4_t __ret; + __ret = __rev0 + __noswap_vabd_s16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(vmovl_u8((uint8x8_t)(vabd_u8(__p0, __p1)))); + return __ret; +} +#else +__ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__rev0, __rev1)))); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint16x8_t __noswap_vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__p0, __p1)))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(vmovl_u32((uint32x2_t)(vabd_u32(__p0, __p1)))); + return __ret; +} +#else +__ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__rev0, __rev1)))); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__p0, __p1)))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(vmovl_u16((uint16x4_t)(vabd_u16(__p0, __p1)))); + return __ret; +} +#else +__ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__rev0, __rev1)))); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__p0, __p1)))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t)(vmovl_u8((uint8x8_t)(vabd_s8(__p0, __p1)))); + return __ret; +} +#else +__ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__rev0, __rev1)))); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vabdl_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__p0, __p1)))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t)(vmovl_u32((uint32x2_t)(vabd_s32(__p0, __p1)))); + return __ret; +} +#else +__ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__rev0, __rev1)))); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vabdl_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__p0, __p1)))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t)(vmovl_u16((uint16x4_t)(vabd_s16(__p0, __p1)))); + return __ret; +} +#else +__ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__rev0, __rev1)))); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vabdl_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__p0, __p1)))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = vmovl_u8(__p0) + vmovl_u8(__p1); + return __ret; +} +#else +__ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vmovl_u8(__rev0) + __noswap_vmovl_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = vmovl_u32(__p0) + vmovl_u32(__p1); + return __ret; +} +#else +__ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vmovl_u32(__rev0) + __noswap_vmovl_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = vmovl_u16(__p0) + vmovl_u16(__p1); + return __ret; +} +#else +__ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vmovl_u16(__rev0) + __noswap_vmovl_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = vmovl_s8(__p0) + vmovl_s8(__p1); + return __ret; +} +#else +__ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vmovl_s8(__rev0) + __noswap_vmovl_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = vmovl_s32(__p0) + vmovl_s32(__p1); + return __ret; +} +#else +__ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __noswap_vmovl_s32(__rev0) + __noswap_vmovl_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = vmovl_s16(__p0) + vmovl_s16(__p1); + return __ret; +} +#else +__ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vmovl_s16(__rev0) + __noswap_vmovl_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 + vmovl_u8(__p1); + return __ret; +} +#else +__ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 + __noswap_vmovl_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 + vmovl_u32(__p1); + return __ret; +} +#else +__ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __rev0 + __noswap_vmovl_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 + vmovl_u16(__p1); + return __ret; +} +#else +__ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 + __noswap_vmovl_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = __p0 + vmovl_s8(__p1); + return __ret; +} +#else +__ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 + __noswap_vmovl_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = __p0 + vmovl_s32(__p1); + return __ret; +} +#else +__ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __rev0 + __noswap_vmovl_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = __p0 + vmovl_s16(__p1); + return __ret; +} +#else +__ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 + __noswap_vmovl_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_f16(__p0_242, __p1_242) __extension__ ({ \ + float16x4_t __s0_242 = __p0_242; \ + float16_t __ret_242; \ +float16x4_t __reint_242 = __s0_242; \ +int16_t __reint1_242 = vget_lane_s16(*(int16x4_t *) &__reint_242, __p1_242); \ + __ret_242 = *(float16_t *) &__reint1_242; \ + __ret_242; \ +}) +#else +#define vget_lane_f16(__p0_243, __p1_243) __extension__ ({ \ + float16x4_t __s0_243 = __p0_243; \ + float16x4_t __rev0_243; __rev0_243 = __builtin_shufflevector(__s0_243, __s0_243, 3, 2, 1, 0); \ + float16_t __ret_243; \ +float16x4_t __reint_243 = __rev0_243; \ +int16_t __reint1_243 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_243, __p1_243); \ + __ret_243 = *(float16_t *) &__reint1_243; \ + __ret_243; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_f16(__p0_244, __p1_244) __extension__ ({ \ + float16x8_t __s0_244 = __p0_244; \ + float16_t __ret_244; \ +float16x8_t __reint_244 = __s0_244; \ +int16_t __reint1_244 = vgetq_lane_s16(*(int16x8_t *) &__reint_244, __p1_244); \ + __ret_244 = *(float16_t *) &__reint1_244; \ + __ret_244; \ +}) +#else +#define vgetq_lane_f16(__p0_245, __p1_245) __extension__ ({ \ + float16x8_t __s0_245 = __p0_245; \ + float16x8_t __rev0_245; __rev0_245 = __builtin_shufflevector(__s0_245, __s0_245, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16_t __ret_245; \ +float16x8_t __reint_245 = __rev0_245; \ +int16_t __reint1_245 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_245, __p1_245); \ + __ret_245 = *(float16_t *) &__reint1_245; \ + __ret_245; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 + vmull_u8(__p1, __p2); + return __ret; +} +#else +__ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 + __noswap_vmull_u8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint16x8_t __noswap_vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 + __noswap_vmull_u8(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + __ret = __p0 + vmull_u32(__p1, __p2); + return __ret; +} +#else +__ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint64x2_t __ret; + __ret = __rev0 + __noswap_vmull_u32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + __ret = __p0 + __noswap_vmull_u32(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 + vmull_u16(__p1, __p2); + return __ret; +} +#else +__ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 + __noswap_vmull_u16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 + __noswap_vmull_u16(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + __ret = __p0 + vmull_s8(__p1, __p2); + return __ret; +} +#else +__ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 + __noswap_vmull_s8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + __ret = __p0 + __noswap_vmull_s8(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = __p0 + vmull_s32(__p1, __p2); + return __ret; +} +#else +__ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int64x2_t __ret; + __ret = __rev0 + __noswap_vmull_s32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = __p0 + __noswap_vmull_s32(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = __p0 + vmull_s16(__p1, __p2); + return __ret; +} +#else +__ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 + __noswap_vmull_s16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = __p0 + __noswap_vmull_s16(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __s2 = __p2; \ + uint64x2_t __ret; \ + __ret = __s0 + vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlal_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __s2 = __p2; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + uint64x2_t __ret; \ + __ret = __rev0 + __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __s2 = __p2; \ + uint32x4_t __ret; \ + __ret = __s0 + vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlal_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = __rev0 + __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int64x2_t __ret; \ + __ret = __s0 + vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + int64x2_t __ret; \ + __ret = __rev0 + __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int32x4_t __ret; \ + __ret = __s0 + vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __rev0 + __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint64x2_t __ret; + __ret = __p0 + vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __rev0 + __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint64x2_t __ret; + __ret = __p0 + __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint32x4_t __ret; + __ret = __p0 + vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 + __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint32x4_t __ret; + __ret = __p0 + __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = __p0 + vmull_s32(__p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __rev0 + __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = __p0 + __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = __p0 + vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 + __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = __p0 + __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 - vmull_u8(__p1, __p2); + return __ret; +} +#else +__ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 - __noswap_vmull_u8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint16x8_t __noswap_vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 - __noswap_vmull_u8(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + __ret = __p0 - vmull_u32(__p1, __p2); + return __ret; +} +#else +__ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint64x2_t __ret; + __ret = __rev0 - __noswap_vmull_u32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + __ret = __p0 - __noswap_vmull_u32(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 - vmull_u16(__p1, __p2); + return __ret; +} +#else +__ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 - __noswap_vmull_u16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 - __noswap_vmull_u16(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + __ret = __p0 - vmull_s8(__p1, __p2); + return __ret; +} +#else +__ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 - __noswap_vmull_s8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + __ret = __p0 - __noswap_vmull_s8(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = __p0 - vmull_s32(__p1, __p2); + return __ret; +} +#else +__ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int64x2_t __ret; + __ret = __rev0 - __noswap_vmull_s32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = __p0 - __noswap_vmull_s32(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = __p0 - vmull_s16(__p1, __p2); + return __ret; +} +#else +__ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 - __noswap_vmull_s16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = __p0 - __noswap_vmull_s16(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __s2 = __p2; \ + uint64x2_t __ret; \ + __ret = __s0 - vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlsl_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint64x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __s2 = __p2; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + uint64x2_t __ret; \ + __ret = __rev0 - __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __s2 = __p2; \ + uint32x4_t __ret; \ + __ret = __s0 - vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlsl_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + uint32x4_t __ret; \ + __ret = __rev0 - __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int64x2_t __ret; \ + __ret = __s0 - vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + int64x2_t __ret; \ + __ret = __rev0 - __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int32x4_t __ret; \ + __ret = __s0 - vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ + __ret; \ +}) +#else +#define vmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + int32x4_t __ret; \ + __ret = __rev0 - __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint64x2_t __ret; + __ret = __p0 - vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __ret; + __ret = __rev0 - __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint64x2_t __ret; + __ret = __p0 - __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint32x4_t __ret; + __ret = __p0 - vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 - __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint32x4_t __ret; + __ret = __p0 - __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = __p0 - vmull_s32(__p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __ret; + __ret = __rev0 - __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = __p0 - __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = __p0 - vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 - __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = __p0 - __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_f16(__p0_246, __p1_246, __p2_246) __extension__ ({ \ + float16_t __s0_246 = __p0_246; \ + float16x4_t __s1_246 = __p1_246; \ + float16x4_t __ret_246; \ +float16_t __reint_246 = __s0_246; \ +float16x4_t __reint1_246 = __s1_246; \ +int16x4_t __reint2_246 = vset_lane_s16(*(int16_t *) &__reint_246, *(int16x4_t *) &__reint1_246, __p2_246); \ + __ret_246 = *(float16x4_t *) &__reint2_246; \ + __ret_246; \ +}) +#else +#define vset_lane_f16(__p0_247, __p1_247, __p2_247) __extension__ ({ \ + float16_t __s0_247 = __p0_247; \ + float16x4_t __s1_247 = __p1_247; \ + float16x4_t __rev1_247; __rev1_247 = __builtin_shufflevector(__s1_247, __s1_247, 3, 2, 1, 0); \ + float16x4_t __ret_247; \ +float16_t __reint_247 = __s0_247; \ +float16x4_t __reint1_247 = __rev1_247; \ +int16x4_t __reint2_247 = __noswap_vset_lane_s16(*(int16_t *) &__reint_247, *(int16x4_t *) &__reint1_247, __p2_247); \ + __ret_247 = *(float16x4_t *) &__reint2_247; \ + __ret_247 = __builtin_shufflevector(__ret_247, __ret_247, 3, 2, 1, 0); \ + __ret_247; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_f16(__p0_248, __p1_248, __p2_248) __extension__ ({ \ + float16_t __s0_248 = __p0_248; \ + float16x8_t __s1_248 = __p1_248; \ + float16x8_t __ret_248; \ +float16_t __reint_248 = __s0_248; \ +float16x8_t __reint1_248 = __s1_248; \ +int16x8_t __reint2_248 = vsetq_lane_s16(*(int16_t *) &__reint_248, *(int16x8_t *) &__reint1_248, __p2_248); \ + __ret_248 = *(float16x8_t *) &__reint2_248; \ + __ret_248; \ +}) +#else +#define vsetq_lane_f16(__p0_249, __p1_249, __p2_249) __extension__ ({ \ + float16_t __s0_249 = __p0_249; \ + float16x8_t __s1_249 = __p1_249; \ + float16x8_t __rev1_249; __rev1_249 = __builtin_shufflevector(__s1_249, __s1_249, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __ret_249; \ +float16_t __reint_249 = __s0_249; \ +float16x8_t __reint1_249 = __rev1_249; \ +int16x8_t __reint2_249 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_249, *(int16x8_t *) &__reint1_249, __p2_249); \ + __ret_249 = *(float16x8_t *) &__reint2_249; \ + __ret_249 = __builtin_shufflevector(__ret_249, __ret_249, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_249; \ +}) +#endif + +#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { + int32_t __ret; + __ret = vqadds_s32(__p0, vqrdmulhs_s32(__p1, __p2)); + return __ret; +} +#else +__ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { + int32_t __ret; + __ret = __noswap_vqadds_s32(__p0, __noswap_vqrdmulhs_s32(__p1, __p2)); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { + int16_t __ret; + __ret = vqaddh_s16(__p0, vqrdmulhh_s16(__p1, __p2)); + return __ret; +} +#else +__ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { + int16_t __ret; + __ret = __noswap_vqaddh_s16(__p0, __noswap_vqrdmulhh_s16(__p1, __p2)); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahs_lane_s32(__p0_250, __p1_250, __p2_250, __p3_250) __extension__ ({ \ + int32_t __s0_250 = __p0_250; \ + int32_t __s1_250 = __p1_250; \ + int32x2_t __s2_250 = __p2_250; \ + int32_t __ret_250; \ + __ret_250 = vqadds_s32(__s0_250, vqrdmulhs_s32(__s1_250, vget_lane_s32(__s2_250, __p3_250))); \ + __ret_250; \ +}) +#else +#define vqrdmlahs_lane_s32(__p0_251, __p1_251, __p2_251, __p3_251) __extension__ ({ \ + int32_t __s0_251 = __p0_251; \ + int32_t __s1_251 = __p1_251; \ + int32x2_t __s2_251 = __p2_251; \ + int32x2_t __rev2_251; __rev2_251 = __builtin_shufflevector(__s2_251, __s2_251, 1, 0); \ + int32_t __ret_251; \ + __ret_251 = __noswap_vqadds_s32(__s0_251, __noswap_vqrdmulhs_s32(__s1_251, __noswap_vget_lane_s32(__rev2_251, __p3_251))); \ + __ret_251; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahh_lane_s16(__p0_252, __p1_252, __p2_252, __p3_252) __extension__ ({ \ + int16_t __s0_252 = __p0_252; \ + int16_t __s1_252 = __p1_252; \ + int16x4_t __s2_252 = __p2_252; \ + int16_t __ret_252; \ + __ret_252 = vqaddh_s16(__s0_252, vqrdmulhh_s16(__s1_252, vget_lane_s16(__s2_252, __p3_252))); \ + __ret_252; \ +}) +#else +#define vqrdmlahh_lane_s16(__p0_253, __p1_253, __p2_253, __p3_253) __extension__ ({ \ + int16_t __s0_253 = __p0_253; \ + int16_t __s1_253 = __p1_253; \ + int16x4_t __s2_253 = __p2_253; \ + int16x4_t __rev2_253; __rev2_253 = __builtin_shufflevector(__s2_253, __s2_253, 3, 2, 1, 0); \ + int16_t __ret_253; \ + __ret_253 = __noswap_vqaddh_s16(__s0_253, __noswap_vqrdmulhh_s16(__s1_253, __noswap_vget_lane_s16(__rev2_253, __p3_253))); \ + __ret_253; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahs_laneq_s32(__p0_254, __p1_254, __p2_254, __p3_254) __extension__ ({ \ + int32_t __s0_254 = __p0_254; \ + int32_t __s1_254 = __p1_254; \ + int32x4_t __s2_254 = __p2_254; \ + int32_t __ret_254; \ + __ret_254 = vqadds_s32(__s0_254, vqrdmulhs_s32(__s1_254, vgetq_lane_s32(__s2_254, __p3_254))); \ + __ret_254; \ +}) +#else +#define vqrdmlahs_laneq_s32(__p0_255, __p1_255, __p2_255, __p3_255) __extension__ ({ \ + int32_t __s0_255 = __p0_255; \ + int32_t __s1_255 = __p1_255; \ + int32x4_t __s2_255 = __p2_255; \ + int32x4_t __rev2_255; __rev2_255 = __builtin_shufflevector(__s2_255, __s2_255, 3, 2, 1, 0); \ + int32_t __ret_255; \ + __ret_255 = __noswap_vqadds_s32(__s0_255, __noswap_vqrdmulhs_s32(__s1_255, __noswap_vgetq_lane_s32(__rev2_255, __p3_255))); \ + __ret_255; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahh_laneq_s16(__p0_256, __p1_256, __p2_256, __p3_256) __extension__ ({ \ + int16_t __s0_256 = __p0_256; \ + int16_t __s1_256 = __p1_256; \ + int16x8_t __s2_256 = __p2_256; \ + int16_t __ret_256; \ + __ret_256 = vqaddh_s16(__s0_256, vqrdmulhh_s16(__s1_256, vgetq_lane_s16(__s2_256, __p3_256))); \ + __ret_256; \ +}) +#else +#define vqrdmlahh_laneq_s16(__p0_257, __p1_257, __p2_257, __p3_257) __extension__ ({ \ + int16_t __s0_257 = __p0_257; \ + int16_t __s1_257 = __p1_257; \ + int16x8_t __s2_257 = __p2_257; \ + int16x8_t __rev2_257; __rev2_257 = __builtin_shufflevector(__s2_257, __s2_257, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16_t __ret_257; \ + __ret_257 = __noswap_vqaddh_s16(__s0_257, __noswap_vqrdmulhh_s16(__s1_257, __noswap_vgetq_lane_s16(__rev2_257, __p3_257))); \ + __ret_257; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { + int32_t __ret; + __ret = vqsubs_s32(__p0, vqrdmulhs_s32(__p1, __p2)); + return __ret; +} +#else +__ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { + int32_t __ret; + __ret = __noswap_vqsubs_s32(__p0, __noswap_vqrdmulhs_s32(__p1, __p2)); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { + int16_t __ret; + __ret = vqsubh_s16(__p0, vqrdmulhh_s16(__p1, __p2)); + return __ret; +} +#else +__ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { + int16_t __ret; + __ret = __noswap_vqsubh_s16(__p0, __noswap_vqrdmulhh_s16(__p1, __p2)); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshs_lane_s32(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \ + int32_t __s0_258 = __p0_258; \ + int32_t __s1_258 = __p1_258; \ + int32x2_t __s2_258 = __p2_258; \ + int32_t __ret_258; \ + __ret_258 = vqsubs_s32(__s0_258, vqrdmulhs_s32(__s1_258, vget_lane_s32(__s2_258, __p3_258))); \ + __ret_258; \ +}) +#else +#define vqrdmlshs_lane_s32(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \ + int32_t __s0_259 = __p0_259; \ + int32_t __s1_259 = __p1_259; \ + int32x2_t __s2_259 = __p2_259; \ + int32x2_t __rev2_259; __rev2_259 = __builtin_shufflevector(__s2_259, __s2_259, 1, 0); \ + int32_t __ret_259; \ + __ret_259 = __noswap_vqsubs_s32(__s0_259, __noswap_vqrdmulhs_s32(__s1_259, __noswap_vget_lane_s32(__rev2_259, __p3_259))); \ + __ret_259; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshh_lane_s16(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \ + int16_t __s0_260 = __p0_260; \ + int16_t __s1_260 = __p1_260; \ + int16x4_t __s2_260 = __p2_260; \ + int16_t __ret_260; \ + __ret_260 = vqsubh_s16(__s0_260, vqrdmulhh_s16(__s1_260, vget_lane_s16(__s2_260, __p3_260))); \ + __ret_260; \ +}) +#else +#define vqrdmlshh_lane_s16(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \ + int16_t __s0_261 = __p0_261; \ + int16_t __s1_261 = __p1_261; \ + int16x4_t __s2_261 = __p2_261; \ + int16x4_t __rev2_261; __rev2_261 = __builtin_shufflevector(__s2_261, __s2_261, 3, 2, 1, 0); \ + int16_t __ret_261; \ + __ret_261 = __noswap_vqsubh_s16(__s0_261, __noswap_vqrdmulhh_s16(__s1_261, __noswap_vget_lane_s16(__rev2_261, __p3_261))); \ + __ret_261; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshs_laneq_s32(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \ + int32_t __s0_262 = __p0_262; \ + int32_t __s1_262 = __p1_262; \ + int32x4_t __s2_262 = __p2_262; \ + int32_t __ret_262; \ + __ret_262 = vqsubs_s32(__s0_262, vqrdmulhs_s32(__s1_262, vgetq_lane_s32(__s2_262, __p3_262))); \ + __ret_262; \ +}) +#else +#define vqrdmlshs_laneq_s32(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \ + int32_t __s0_263 = __p0_263; \ + int32_t __s1_263 = __p1_263; \ + int32x4_t __s2_263 = __p2_263; \ + int32x4_t __rev2_263; __rev2_263 = __builtin_shufflevector(__s2_263, __s2_263, 3, 2, 1, 0); \ + int32_t __ret_263; \ + __ret_263 = __noswap_vqsubs_s32(__s0_263, __noswap_vqrdmulhs_s32(__s1_263, __noswap_vgetq_lane_s32(__rev2_263, __p3_263))); \ + __ret_263; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshh_laneq_s16(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \ + int16_t __s0_264 = __p0_264; \ + int16_t __s1_264 = __p1_264; \ + int16x8_t __s2_264 = __p2_264; \ + int16_t __ret_264; \ + __ret_264 = vqsubh_s16(__s0_264, vqrdmulhh_s16(__s1_264, vgetq_lane_s16(__s2_264, __p3_264))); \ + __ret_264; \ +}) +#else +#define vqrdmlshh_laneq_s16(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \ + int16_t __s0_265 = __p0_265; \ + int16_t __s1_265 = __p1_265; \ + int16x8_t __s2_265 = __p2_265; \ + int16x8_t __rev2_265; __rev2_265 = __builtin_shufflevector(__s2_265, __s2_265, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16_t __ret_265; \ + __ret_265 = __noswap_vqsubh_s16(__s0_265, __noswap_vqrdmulhh_s16(__s1_265, __noswap_vgetq_lane_s16(__rev2_265, __p3_265))); \ + __ret_265; \ +}) +#endif + +#endif +#if defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = vabdl_u8(vget_high_u8(__p0), vget_high_u8(__p1)); + return __ret; +} +#else +__ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vabdl_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = vabdl_u32(vget_high_u32(__p0), vget_high_u32(__p1)); + return __ret; +} +#else +__ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vabdl_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = vabdl_u16(vget_high_u16(__p0), vget_high_u16(__p1)); + return __ret; +} +#else +__ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vabdl_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = vabdl_s8(vget_high_s8(__p0), vget_high_s8(__p1)); + return __ret; +} +#else +__ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vabdl_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = vabdl_s32(vget_high_s32(__p0), vget_high_s32(__p1)); + return __ret; +} +#else +__ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vabdl_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = vabdl_s16(vget_high_s16(__p0), vget_high_s16(__p1)); + return __ret; +} +#else +__ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vabdl_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = vmovl_high_u8(__p0) + vmovl_high_u8(__p1); + return __ret; +} +#else +__ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vmovl_high_u8(__rev0) + __noswap_vmovl_high_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = vmovl_high_u32(__p0) + vmovl_high_u32(__p1); + return __ret; +} +#else +__ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vmovl_high_u32(__rev0) + __noswap_vmovl_high_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = vmovl_high_u16(__p0) + vmovl_high_u16(__p1); + return __ret; +} +#else +__ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vmovl_high_u16(__rev0) + __noswap_vmovl_high_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = vmovl_high_s8(__p0) + vmovl_high_s8(__p1); + return __ret; +} +#else +__ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vmovl_high_s8(__rev0) + __noswap_vmovl_high_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = vmovl_high_s32(__p0) + vmovl_high_s32(__p1); + return __ret; +} +#else +__ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vmovl_high_s32(__rev0) + __noswap_vmovl_high_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = vmovl_high_s16(__p0) + vmovl_high_s16(__p1); + return __ret; +} +#else +__ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vmovl_high_s16(__rev0) + __noswap_vmovl_high_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = __p0 + vmovl_high_u8(__p1); + return __ret; +} +#else +__ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 + __noswap_vmovl_high_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = __p0 + vmovl_high_u32(__p1); + return __ret; +} +#else +__ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = __rev0 + __noswap_vmovl_high_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = __p0 + vmovl_high_u16(__p1); + return __ret; +} +#else +__ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 + __noswap_vmovl_high_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = __p0 + vmovl_high_s8(__p1); + return __ret; +} +#else +__ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 + __noswap_vmovl_high_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = __p0 + vmovl_high_s32(__p1); + return __ret; +} +#else +__ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __rev0 + __noswap_vmovl_high_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = __p0 + vmovl_high_s16(__p1); + return __ret; +} +#else +__ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 + __noswap_vmovl_high_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_p64(__p0_266, __p1_266, __p2_266, __p3_266) __extension__ ({ \ + poly64x2_t __s0_266 = __p0_266; \ + poly64x1_t __s2_266 = __p2_266; \ + poly64x2_t __ret_266; \ + __ret_266 = vsetq_lane_p64(vget_lane_p64(__s2_266, __p3_266), __s0_266, __p1_266); \ + __ret_266; \ +}) +#else +#define vcopyq_lane_p64(__p0_267, __p1_267, __p2_267, __p3_267) __extension__ ({ \ + poly64x2_t __s0_267 = __p0_267; \ + poly64x1_t __s2_267 = __p2_267; \ + poly64x2_t __rev0_267; __rev0_267 = __builtin_shufflevector(__s0_267, __s0_267, 1, 0); \ + poly64x2_t __ret_267; \ + __ret_267 = __noswap_vsetq_lane_p64(__noswap_vget_lane_p64(__s2_267, __p3_267), __rev0_267, __p1_267); \ + __ret_267 = __builtin_shufflevector(__ret_267, __ret_267, 1, 0); \ + __ret_267; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_f64(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \ + float64x2_t __s0_268 = __p0_268; \ + float64x1_t __s2_268 = __p2_268; \ + float64x2_t __ret_268; \ + __ret_268 = vsetq_lane_f64(vget_lane_f64(__s2_268, __p3_268), __s0_268, __p1_268); \ + __ret_268; \ +}) +#else +#define vcopyq_lane_f64(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \ + float64x2_t __s0_269 = __p0_269; \ + float64x1_t __s2_269 = __p2_269; \ + float64x2_t __rev0_269; __rev0_269 = __builtin_shufflevector(__s0_269, __s0_269, 1, 0); \ + float64x2_t __ret_269; \ + __ret_269 = __noswap_vsetq_lane_f64(__noswap_vget_lane_f64(__s2_269, __p3_269), __rev0_269, __p1_269); \ + __ret_269 = __builtin_shufflevector(__ret_269, __ret_269, 1, 0); \ + __ret_269; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_p64(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \ + poly64x1_t __s0_270 = __p0_270; \ + poly64x1_t __s2_270 = __p2_270; \ + poly64x1_t __ret_270; \ + __ret_270 = vset_lane_p64(vget_lane_p64(__s2_270, __p3_270), __s0_270, __p1_270); \ + __ret_270; \ +}) +#else +#define vcopy_lane_p64(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \ + poly64x1_t __s0_271 = __p0_271; \ + poly64x1_t __s2_271 = __p2_271; \ + poly64x1_t __ret_271; \ + __ret_271 = __noswap_vset_lane_p64(__noswap_vget_lane_p64(__s2_271, __p3_271), __s0_271, __p1_271); \ + __ret_271; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_f64(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \ + float64x1_t __s0_272 = __p0_272; \ + float64x1_t __s2_272 = __p2_272; \ + float64x1_t __ret_272; \ + __ret_272 = vset_lane_f64(vget_lane_f64(__s2_272, __p3_272), __s0_272, __p1_272); \ + __ret_272; \ +}) +#else +#define vcopy_lane_f64(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \ + float64x1_t __s0_273 = __p0_273; \ + float64x1_t __s2_273 = __p2_273; \ + float64x1_t __ret_273; \ + __ret_273 = __noswap_vset_lane_f64(__noswap_vget_lane_f64(__s2_273, __p3_273), __s0_273, __p1_273); \ + __ret_273; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_p64(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \ + poly64x2_t __s0_274 = __p0_274; \ + poly64x2_t __s2_274 = __p2_274; \ + poly64x2_t __ret_274; \ + __ret_274 = vsetq_lane_p64(vgetq_lane_p64(__s2_274, __p3_274), __s0_274, __p1_274); \ + __ret_274; \ +}) +#else +#define vcopyq_laneq_p64(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \ + poly64x2_t __s0_275 = __p0_275; \ + poly64x2_t __s2_275 = __p2_275; \ + poly64x2_t __rev0_275; __rev0_275 = __builtin_shufflevector(__s0_275, __s0_275, 1, 0); \ + poly64x2_t __rev2_275; __rev2_275 = __builtin_shufflevector(__s2_275, __s2_275, 1, 0); \ + poly64x2_t __ret_275; \ + __ret_275 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_275, __p3_275), __rev0_275, __p1_275); \ + __ret_275 = __builtin_shufflevector(__ret_275, __ret_275, 1, 0); \ + __ret_275; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_f64(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \ + float64x2_t __s0_276 = __p0_276; \ + float64x2_t __s2_276 = __p2_276; \ + float64x2_t __ret_276; \ + __ret_276 = vsetq_lane_f64(vgetq_lane_f64(__s2_276, __p3_276), __s0_276, __p1_276); \ + __ret_276; \ +}) +#else +#define vcopyq_laneq_f64(__p0_277, __p1_277, __p2_277, __p3_277) __extension__ ({ \ + float64x2_t __s0_277 = __p0_277; \ + float64x2_t __s2_277 = __p2_277; \ + float64x2_t __rev0_277; __rev0_277 = __builtin_shufflevector(__s0_277, __s0_277, 1, 0); \ + float64x2_t __rev2_277; __rev2_277 = __builtin_shufflevector(__s2_277, __s2_277, 1, 0); \ + float64x2_t __ret_277; \ + __ret_277 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_277, __p3_277), __rev0_277, __p1_277); \ + __ret_277 = __builtin_shufflevector(__ret_277, __ret_277, 1, 0); \ + __ret_277; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_p64(__p0_278, __p1_278, __p2_278, __p3_278) __extension__ ({ \ + poly64x1_t __s0_278 = __p0_278; \ + poly64x2_t __s2_278 = __p2_278; \ + poly64x1_t __ret_278; \ + __ret_278 = vset_lane_p64(vgetq_lane_p64(__s2_278, __p3_278), __s0_278, __p1_278); \ + __ret_278; \ +}) +#else +#define vcopy_laneq_p64(__p0_279, __p1_279, __p2_279, __p3_279) __extension__ ({ \ + poly64x1_t __s0_279 = __p0_279; \ + poly64x2_t __s2_279 = __p2_279; \ + poly64x2_t __rev2_279; __rev2_279 = __builtin_shufflevector(__s2_279, __s2_279, 1, 0); \ + poly64x1_t __ret_279; \ + __ret_279 = __noswap_vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_279, __p3_279), __s0_279, __p1_279); \ + __ret_279; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_f64(__p0_280, __p1_280, __p2_280, __p3_280) __extension__ ({ \ + float64x1_t __s0_280 = __p0_280; \ + float64x2_t __s2_280 = __p2_280; \ + float64x1_t __ret_280; \ + __ret_280 = vset_lane_f64(vgetq_lane_f64(__s2_280, __p3_280), __s0_280, __p1_280); \ + __ret_280; \ +}) +#else +#define vcopy_laneq_f64(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \ + float64x1_t __s0_281 = __p0_281; \ + float64x2_t __s2_281 = __p2_281; \ + float64x2_t __rev2_281; __rev2_281 = __builtin_shufflevector(__s2_281, __s2_281, 1, 0); \ + float64x1_t __ret_281; \ + __ret_281 = __noswap_vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_281, __p3_281), __s0_281, __p1_281); \ + __ret_281; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint16x8_t __ret; + __ret = vmlal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2)); + return __ret; +} +#else +__ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vmlal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint64x2_t __ret; + __ret = vmlal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2)); + return __ret; +} +#else +__ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vmlal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint32x4_t __ret; + __ret = vmlal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2)); + return __ret; +} +#else +__ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vmlal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { + int16x8_t __ret; + __ret = vmlal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2)); + return __ret; +} +#else +__ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vmlal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + __ret = vmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); + return __ret; +} +#else +__ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + __ret = vmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); + return __ret; +} +#else +__ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint64x2_t __ret; + __ret = vmlal_n_u32(__p0, vget_high_u32(__p1), __p2); + return __ret; +} +#else +__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vmlal_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint32x4_t __ret; + __ret = vmlal_n_u16(__p0, vget_high_u16(__p1), __p2); + return __ret; +} +#else +__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vmlal_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vmlal_n_s32(__p0, vget_high_s32(__p1), __p2); + return __ret; +} +#else +__ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vmlal_n_s16(__p0, vget_high_s16(__p1), __p2); + return __ret; +} +#else +__ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint16x8_t __ret; + __ret = vmlsl_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2)); + return __ret; +} +#else +__ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vmlsl_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint64x2_t __ret; + __ret = vmlsl_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2)); + return __ret; +} +#else +__ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vmlsl_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint32x4_t __ret; + __ret = vmlsl_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2)); + return __ret; +} +#else +__ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vmlsl_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { + int16x8_t __ret; + __ret = vmlsl_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2)); + return __ret; +} +#else +__ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vmlsl_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + __ret = vmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); + return __ret; +} +#else +__ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + __ret = vmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); + return __ret; +} +#else +__ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint64x2_t __ret; + __ret = vmlsl_n_u32(__p0, vget_high_u32(__p1), __p2); + return __ret; +} +#else +__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vmlsl_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint32x4_t __ret; + __ret = vmlsl_n_u16(__p0, vget_high_u16(__p1), __p2); + return __ret; +} +#else +__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vmlsl_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vmlsl_n_s32(__p0, vget_high_s32(__p1), __p2); + return __ret; +} +#else +__ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vmlsl_n_s16(__p0, vget_high_s16(__p1), __p2); + return __ret; +} +#else +__ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_lane_f64(__p0_282, __p1_282, __p2_282) __extension__ ({ \ + float64x1_t __s0_282 = __p0_282; \ + float64x1_t __s1_282 = __p1_282; \ + float64x1_t __ret_282; \ + float64_t __x_282 = vget_lane_f64(__s0_282, 0); \ + float64_t __y_282 = vget_lane_f64(__s1_282, __p2_282); \ + float64_t __z_282 = vmulxd_f64(__x_282, __y_282); \ + __ret_282 = vset_lane_f64(__z_282, __s0_282, __p2_282); \ + __ret_282; \ +}) +#else +#define vmulx_lane_f64(__p0_283, __p1_283, __p2_283) __extension__ ({ \ + float64x1_t __s0_283 = __p0_283; \ + float64x1_t __s1_283 = __p1_283; \ + float64x1_t __ret_283; \ + float64_t __x_283 = __noswap_vget_lane_f64(__s0_283, 0); \ + float64_t __y_283 = __noswap_vget_lane_f64(__s1_283, __p2_283); \ + float64_t __z_283 = __noswap_vmulxd_f64(__x_283, __y_283); \ + __ret_283 = __noswap_vset_lane_f64(__z_283, __s0_283, __p2_283); \ + __ret_283; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_laneq_f64(__p0_284, __p1_284, __p2_284) __extension__ ({ \ + float64x1_t __s0_284 = __p0_284; \ + float64x2_t __s1_284 = __p1_284; \ + float64x1_t __ret_284; \ + float64_t __x_284 = vget_lane_f64(__s0_284, 0); \ + float64_t __y_284 = vgetq_lane_f64(__s1_284, __p2_284); \ + float64_t __z_284 = vmulxd_f64(__x_284, __y_284); \ + __ret_284 = vset_lane_f64(__z_284, __s0_284, 0); \ + __ret_284; \ +}) +#else +#define vmulx_laneq_f64(__p0_285, __p1_285, __p2_285) __extension__ ({ \ + float64x1_t __s0_285 = __p0_285; \ + float64x2_t __s1_285 = __p1_285; \ + float64x2_t __rev1_285; __rev1_285 = __builtin_shufflevector(__s1_285, __s1_285, 1, 0); \ + float64x1_t __ret_285; \ + float64_t __x_285 = __noswap_vget_lane_f64(__s0_285, 0); \ + float64_t __y_285 = __noswap_vgetq_lane_f64(__rev1_285, __p2_285); \ + float64_t __z_285 = __noswap_vmulxd_f64(__x_285, __y_285); \ + __ret_285 = __noswap_vset_lane_f64(__z_285, __s0_285, 0); \ + __ret_285; \ +}) +#endif + +#endif +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 + vabdl_u8(__p1, __p2); + return __ret; +} +#else +__ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __rev0 + __noswap_vabdl_u8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai uint16x8_t __noswap_vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 + __noswap_vabdl_u8(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + __ret = __p0 + vabdl_u32(__p1, __p2); + return __ret; +} +#else +__ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + uint64x2_t __ret; + __ret = __rev0 + __noswap_vabdl_u32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai uint64x2_t __noswap_vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + __ret = __p0 + __noswap_vabdl_u32(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 + vabdl_u16(__p1, __p2); + return __ret; +} +#else +__ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __rev0 + __noswap_vabdl_u16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai uint32x4_t __noswap_vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 + __noswap_vabdl_u16(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + __ret = __p0 + vabdl_s8(__p1, __p2); + return __ret; +} +#else +__ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __rev0 + __noswap_vabdl_s8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai int16x8_t __noswap_vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + __ret = __p0 + __noswap_vabdl_s8(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = __p0 + vabdl_s32(__p1, __p2); + return __ret; +} +#else +__ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + int64x2_t __ret; + __ret = __rev0 + __noswap_vabdl_s32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai int64x2_t __noswap_vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = __p0 + __noswap_vabdl_s32(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = __p0 + vabdl_s16(__p1, __p2); + return __ret; +} +#else +__ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __rev0 + __noswap_vabdl_s16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai int32x4_t __noswap_vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = __p0 + __noswap_vabdl_s16(__p1, __p2); + return __ret; +} +#endif + +#if defined(__aarch64__) +#ifdef __LITTLE_ENDIAN__ +__ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint16x8_t __ret; + __ret = vabal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2)); + return __ret; +} +#else +__ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __ret; + __ret = __noswap_vabal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint64x2_t __ret; + __ret = vabal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2)); + return __ret; +} +#else +__ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + uint64x2_t __ret; + __ret = __noswap_vabal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint32x4_t __ret; + __ret = vabal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2)); + return __ret; +} +#else +__ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + uint32x4_t __ret; + __ret = __noswap_vabal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { + int16x8_t __ret; + __ret = vabal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2)); + return __ret; +} +#else +__ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __ret; + __ret = __noswap_vabal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + __ret = vabal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); + return __ret; +} +#else +__ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + int64x2_t __ret; + __ret = __noswap_vabal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + __ret = vabal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); + return __ret; +} +#else +__ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + int32x4_t __ret; + __ret = __noswap_vabal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#endif + +#undef __ai + +#endif /* __ARM_NEON_H */ diff --git a/c_headers/armintr.h b/c_headers/armintr.h new file mode 100644 index 000000000..933afcbb9 --- /dev/null +++ b/c_headers/armintr.h @@ -0,0 +1,45 @@ +/*===---- armintr.h - ARM Windows intrinsics -------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +/* Only include this if we're compiling for the windows platform. */ +#ifndef _MSC_VER +#include_next +#else + +#ifndef __ARMINTR_H +#define __ARMINTR_H + +typedef enum +{ + _ARM_BARRIER_SY = 0xF, + _ARM_BARRIER_ST = 0xE, + _ARM_BARRIER_ISH = 0xB, + _ARM_BARRIER_ISHST = 0xA, + _ARM_BARRIER_NSH = 0x7, + _ARM_BARRIER_NSHST = 0x6, + _ARM_BARRIER_OSH = 0x3, + _ARM_BARRIER_OSHST = 0x2 +} _ARMINTR_BARRIER_TYPE; + +#endif /* __ARMINTR_H */ +#endif /* _MSC_VER */ diff --git a/c_headers/avx2intrin.h b/c_headers/avx2intrin.h index d8b6b0aa4..13bcbef4d 100644 --- a/c_headers/avx2intrin.h +++ b/c_headers/avx2intrin.h @@ -29,10 +29,12 @@ #define __AVX2INTRIN_H /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx2"))) /* SSE4 Multiple Packed Sums of Absolute Difference. */ -#define _mm256_mpsadbw_epu8(X, Y, M) __builtin_ia32_mpsadbw256((X), (Y), (M)) +#define _mm256_mpsadbw_epu8(X, Y, M) \ + (__m256i)__builtin_ia32_mpsadbw256((__v32qi)(__m256i)(X), \ + (__v32qi)(__m256i)(Y), (int)(M)) static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_abs_epi8(__m256i __a) @@ -79,25 +81,25 @@ _mm256_packus_epi32(__m256i __V1, __m256i __V2) static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_add_epi8(__m256i __a, __m256i __b) { - return (__m256i)((__v32qi)__a + (__v32qi)__b); + return (__m256i)((__v32qu)__a + (__v32qu)__b); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_add_epi16(__m256i __a, __m256i __b) { - return (__m256i)((__v16hi)__a + (__v16hi)__b); + return (__m256i)((__v16hu)__a + (__v16hu)__b); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_add_epi32(__m256i __a, __m256i __b) { - return (__m256i)((__v8si)__a + (__v8si)__b); + return (__m256i)((__v8su)__a + (__v8su)__b); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_add_epi64(__m256i __a, __m256i __b) { - return __a + __b; + return (__m256i)((__v4du)__a + (__v4du)__b); } static __inline__ __m256i __DEFAULT_FN_ATTRS @@ -124,21 +126,20 @@ _mm256_adds_epu16(__m256i __a, __m256i __b) return (__m256i)__builtin_ia32_paddusw256((__v16hi)__a, (__v16hi)__b); } -#define _mm256_alignr_epi8(a, b, n) __extension__ ({ \ - __m256i __a = (a); \ - __m256i __b = (b); \ - (__m256i)__builtin_ia32_palignr256((__v32qi)__a, (__v32qi)__b, (n)); }) +#define _mm256_alignr_epi8(a, b, n) __extension__ ({ \ + (__m256i)__builtin_ia32_palignr256((__v32qi)(__m256i)(a), \ + (__v32qi)(__m256i)(b), (n)); }) static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_and_si256(__m256i __a, __m256i __b) { - return __a & __b; + return (__m256i)((__v4du)__a & (__v4du)__b); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_andnot_si256(__m256i __a, __m256i __b) { - return ~__a & __b; + return (__m256i)(~(__v4du)__a & (__v4du)__b); } static __inline__ __m256i __DEFAULT_FN_ATTRS @@ -160,20 +161,19 @@ _mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M) (__v32qi)__M); } -#define _mm256_blend_epi16(V1, V2, M) __extension__ ({ \ - __m256i __V1 = (V1); \ - __m256i __V2 = (V2); \ - (__m256i)__builtin_shufflevector((__v16hi)__V1, (__v16hi)__V2, \ - (((M) & 0x01) ? 16 : 0), \ - (((M) & 0x02) ? 17 : 1), \ - (((M) & 0x04) ? 18 : 2), \ - (((M) & 0x08) ? 19 : 3), \ - (((M) & 0x10) ? 20 : 4), \ - (((M) & 0x20) ? 21 : 5), \ - (((M) & 0x40) ? 22 : 6), \ - (((M) & 0x80) ? 23 : 7), \ - (((M) & 0x01) ? 24 : 8), \ - (((M) & 0x02) ? 25 : 9), \ +#define _mm256_blend_epi16(V1, V2, M) __extension__ ({ \ + (__m256i)__builtin_shufflevector((__v16hi)(__m256i)(V1), \ + (__v16hi)(__m256i)(V2), \ + (((M) & 0x01) ? 16 : 0), \ + (((M) & 0x02) ? 17 : 1), \ + (((M) & 0x04) ? 18 : 2), \ + (((M) & 0x08) ? 19 : 3), \ + (((M) & 0x10) ? 20 : 4), \ + (((M) & 0x20) ? 21 : 5), \ + (((M) & 0x40) ? 22 : 6), \ + (((M) & 0x80) ? 23 : 7), \ + (((M) & 0x01) ? 24 : 8), \ + (((M) & 0x02) ? 25 : 9), \ (((M) & 0x04) ? 26 : 10), \ (((M) & 0x08) ? 27 : 11), \ (((M) & 0x10) ? 28 : 12), \ @@ -202,13 +202,15 @@ _mm256_cmpeq_epi32(__m256i __a, __m256i __b) static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_cmpeq_epi64(__m256i __a, __m256i __b) { - return (__m256i)(__a == __b); + return (__m256i)((__v4di)__a == (__v4di)__b); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_cmpgt_epi8(__m256i __a, __m256i __b) { - return (__m256i)((__v32qi)__a > (__v32qi)__b); + /* This function always performs a signed comparison, but __v32qi is a char + which may be signed or unsigned, so use __v32qs. */ + return (__m256i)((__v32qs)__a > (__v32qs)__b); } static __inline__ __m256i __DEFAULT_FN_ATTRS @@ -226,7 +228,7 @@ _mm256_cmpgt_epi32(__m256i __a, __m256i __b) static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_cmpgt_epi64(__m256i __a, __m256i __b) { - return (__m256i)(__a > __b); + return (__m256i)((__v4di)__a > (__v4di)__b); } static __inline__ __m256i __DEFAULT_FN_ATTRS @@ -358,73 +360,79 @@ _mm256_movemask_epi8(__m256i __a) static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_cvtepi8_epi16(__m128i __V) { - return (__m256i)__builtin_ia32_pmovsxbw256((__v16qi)__V); + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m256i)__builtin_convertvector((__v16qs)__V, __v16hi); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_cvtepi8_epi32(__m128i __V) { - return (__m256i)__builtin_ia32_pmovsxbd256((__v16qi)__V); + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_cvtepi8_epi64(__m128i __V) { - return (__m256i)__builtin_ia32_pmovsxbq256((__v16qi)__V); + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4di); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_cvtepi16_epi32(__m128i __V) { - return (__m256i)__builtin_ia32_pmovsxwd256((__v8hi)__V); + return (__m256i)__builtin_convertvector((__v8hi)__V, __v8si); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_cvtepi16_epi64(__m128i __V) { - return (__m256i)__builtin_ia32_pmovsxwq256((__v8hi)__V); + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4di); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_cvtepi32_epi64(__m128i __V) { - return (__m256i)__builtin_ia32_pmovsxdq256((__v4si)__V); + return (__m256i)__builtin_convertvector((__v4si)__V, __v4di); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_cvtepu8_epi16(__m128i __V) { - return (__m256i)__builtin_ia32_pmovzxbw256((__v16qi)__V); + return (__m256i)__builtin_convertvector((__v16qu)__V, __v16hi); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_cvtepu8_epi32(__m128i __V) { - return (__m256i)__builtin_ia32_pmovzxbd256((__v16qi)__V); + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_cvtepu8_epi64(__m128i __V) { - return (__m256i)__builtin_ia32_pmovzxbq256((__v16qi)__V); + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4di); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_cvtepu16_epi32(__m128i __V) { - return (__m256i)__builtin_ia32_pmovzxwd256((__v8hi)__V); + return (__m256i)__builtin_convertvector((__v8hu)__V, __v8si); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_cvtepu16_epi64(__m128i __V) { - return (__m256i)__builtin_ia32_pmovzxwq256((__v8hi)__V); + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4di); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_cvtepu32_epi64(__m128i __V) { - return (__m256i)__builtin_ia32_pmovzxdq256((__v4si)__V); + return (__m256i)__builtin_convertvector((__v4su)__V, __v4di); } static __inline__ __m256i __DEFAULT_FN_ATTRS @@ -454,13 +462,13 @@ _mm256_mulhi_epi16(__m256i __a, __m256i __b) static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_mullo_epi16(__m256i __a, __m256i __b) { - return (__m256i)((__v16hi)__a * (__v16hi)__b); + return (__m256i)((__v16hu)__a * (__v16hu)__b); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_mullo_epi32 (__m256i __a, __m256i __b) { - return (__m256i)((__v8si)__a * (__v8si)__b); + return (__m256i)((__v8su)__a * (__v8su)__b); } static __inline__ __m256i __DEFAULT_FN_ATTRS @@ -472,7 +480,7 @@ _mm256_mul_epu32(__m256i __a, __m256i __b) static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_or_si256(__m256i __a, __m256i __b) { - return __a | __b; + return (__m256i)((__v4du)__a | (__v4du)__b); } static __inline__ __m256i __DEFAULT_FN_ATTRS @@ -488,39 +496,43 @@ _mm256_shuffle_epi8(__m256i __a, __m256i __b) } #define _mm256_shuffle_epi32(a, imm) __extension__ ({ \ - __m256i __a = (a); \ - (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)_mm256_set1_epi32(0), \ - (imm) & 0x3, ((imm) & 0xc) >> 2, \ - ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \ - 4 + (((imm) & 0x03) >> 0), \ - 4 + (((imm) & 0x0c) >> 2), \ - 4 + (((imm) & 0x30) >> 4), \ - 4 + (((imm) & 0xc0) >> 6)); }) + (__m256i)__builtin_shufflevector((__v8si)(__m256i)(a), \ + (__v8si)_mm256_undefined_si256(), \ + 0 + (((imm) >> 0) & 0x3), \ + 0 + (((imm) >> 2) & 0x3), \ + 0 + (((imm) >> 4) & 0x3), \ + 0 + (((imm) >> 6) & 0x3), \ + 4 + (((imm) >> 0) & 0x3), \ + 4 + (((imm) >> 2) & 0x3), \ + 4 + (((imm) >> 4) & 0x3), \ + 4 + (((imm) >> 6) & 0x3)); }) #define _mm256_shufflehi_epi16(a, imm) __extension__ ({ \ - __m256i __a = (a); \ - (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)_mm256_set1_epi16(0), \ + (__m256i)__builtin_shufflevector((__v16hi)(__m256i)(a), \ + (__v16hi)_mm256_undefined_si256(), \ 0, 1, 2, 3, \ - 4 + (((imm) & 0x03) >> 0), \ - 4 + (((imm) & 0x0c) >> 2), \ - 4 + (((imm) & 0x30) >> 4), \ - 4 + (((imm) & 0xc0) >> 6), \ + 4 + (((imm) >> 0) & 0x3), \ + 4 + (((imm) >> 2) & 0x3), \ + 4 + (((imm) >> 4) & 0x3), \ + 4 + (((imm) >> 6) & 0x3), \ 8, 9, 10, 11, \ - 12 + (((imm) & 0x03) >> 0), \ - 12 + (((imm) & 0x0c) >> 2), \ - 12 + (((imm) & 0x30) >> 4), \ - 12 + (((imm) & 0xc0) >> 6)); }) + 12 + (((imm) >> 0) & 0x3), \ + 12 + (((imm) >> 2) & 0x3), \ + 12 + (((imm) >> 4) & 0x3), \ + 12 + (((imm) >> 6) & 0x3)); }) #define _mm256_shufflelo_epi16(a, imm) __extension__ ({ \ - __m256i __a = (a); \ - (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)_mm256_set1_epi16(0), \ - (imm) & 0x3,((imm) & 0xc) >> 2, \ - ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \ + (__m256i)__builtin_shufflevector((__v16hi)(__m256i)(a), \ + (__v16hi)_mm256_undefined_si256(), \ + 0 + (((imm) >> 0) & 0x3), \ + 0 + (((imm) >> 2) & 0x3), \ + 0 + (((imm) >> 4) & 0x3), \ + 0 + (((imm) >> 6) & 0x3), \ 4, 5, 6, 7, \ - 8 + (((imm) & 0x03) >> 0), \ - 8 + (((imm) & 0x0c) >> 2), \ - 8 + (((imm) & 0x30) >> 4), \ - 8 + (((imm) & 0xc0) >> 6), \ + 8 + (((imm) >> 0) & 0x3), \ + 8 + (((imm) >> 2) & 0x3), \ + 8 + (((imm) >> 4) & 0x3), \ + 8 + (((imm) >> 6) & 0x3), \ 12, 13, 14, 15); }) static __inline__ __m256i __DEFAULT_FN_ATTRS @@ -541,9 +553,42 @@ _mm256_sign_epi32(__m256i __a, __m256i __b) return (__m256i)__builtin_ia32_psignd256((__v8si)__a, (__v8si)__b); } -#define _mm256_slli_si256(a, count) __extension__ ({ \ - __m256i __a = (a); \ - (__m256i)__builtin_ia32_pslldqi256(__a, (count)*8); }) +#define _mm256_slli_si256(a, imm) __extension__ ({ \ + (__m256i)__builtin_shufflevector( \ + (__v32qi)_mm256_setzero_si256(), \ + (__v32qi)(__m256i)(a), \ + ((char)(imm)&0xF0) ? 0 : ((char)(imm)>0x0 ? 16 : 32) - (char)(imm), \ + ((char)(imm)&0xF0) ? 1 : ((char)(imm)>0x1 ? 17 : 33) - (char)(imm), \ + ((char)(imm)&0xF0) ? 2 : ((char)(imm)>0x2 ? 18 : 34) - (char)(imm), \ + ((char)(imm)&0xF0) ? 3 : ((char)(imm)>0x3 ? 19 : 35) - (char)(imm), \ + ((char)(imm)&0xF0) ? 4 : ((char)(imm)>0x4 ? 20 : 36) - (char)(imm), \ + ((char)(imm)&0xF0) ? 5 : ((char)(imm)>0x5 ? 21 : 37) - (char)(imm), \ + ((char)(imm)&0xF0) ? 6 : ((char)(imm)>0x6 ? 22 : 38) - (char)(imm), \ + ((char)(imm)&0xF0) ? 7 : ((char)(imm)>0x7 ? 23 : 39) - (char)(imm), \ + ((char)(imm)&0xF0) ? 8 : ((char)(imm)>0x8 ? 24 : 40) - (char)(imm), \ + ((char)(imm)&0xF0) ? 9 : ((char)(imm)>0x9 ? 25 : 41) - (char)(imm), \ + ((char)(imm)&0xF0) ? 10 : ((char)(imm)>0xA ? 26 : 42) - (char)(imm), \ + ((char)(imm)&0xF0) ? 11 : ((char)(imm)>0xB ? 27 : 43) - (char)(imm), \ + ((char)(imm)&0xF0) ? 12 : ((char)(imm)>0xC ? 28 : 44) - (char)(imm), \ + ((char)(imm)&0xF0) ? 13 : ((char)(imm)>0xD ? 29 : 45) - (char)(imm), \ + ((char)(imm)&0xF0) ? 14 : ((char)(imm)>0xE ? 30 : 46) - (char)(imm), \ + ((char)(imm)&0xF0) ? 15 : ((char)(imm)>0xF ? 31 : 47) - (char)(imm), \ + ((char)(imm)&0xF0) ? 16 : ((char)(imm)>0x0 ? 32 : 48) - (char)(imm), \ + ((char)(imm)&0xF0) ? 17 : ((char)(imm)>0x1 ? 33 : 49) - (char)(imm), \ + ((char)(imm)&0xF0) ? 18 : ((char)(imm)>0x2 ? 34 : 50) - (char)(imm), \ + ((char)(imm)&0xF0) ? 19 : ((char)(imm)>0x3 ? 35 : 51) - (char)(imm), \ + ((char)(imm)&0xF0) ? 20 : ((char)(imm)>0x4 ? 36 : 52) - (char)(imm), \ + ((char)(imm)&0xF0) ? 21 : ((char)(imm)>0x5 ? 37 : 53) - (char)(imm), \ + ((char)(imm)&0xF0) ? 22 : ((char)(imm)>0x6 ? 38 : 54) - (char)(imm), \ + ((char)(imm)&0xF0) ? 23 : ((char)(imm)>0x7 ? 39 : 55) - (char)(imm), \ + ((char)(imm)&0xF0) ? 24 : ((char)(imm)>0x8 ? 40 : 56) - (char)(imm), \ + ((char)(imm)&0xF0) ? 25 : ((char)(imm)>0x9 ? 41 : 57) - (char)(imm), \ + ((char)(imm)&0xF0) ? 26 : ((char)(imm)>0xA ? 42 : 58) - (char)(imm), \ + ((char)(imm)&0xF0) ? 27 : ((char)(imm)>0xB ? 43 : 59) - (char)(imm), \ + ((char)(imm)&0xF0) ? 28 : ((char)(imm)>0xC ? 44 : 60) - (char)(imm), \ + ((char)(imm)&0xF0) ? 29 : ((char)(imm)>0xD ? 45 : 61) - (char)(imm), \ + ((char)(imm)&0xF0) ? 30 : ((char)(imm)>0xE ? 46 : 62) - (char)(imm), \ + ((char)(imm)&0xF0) ? 31 : ((char)(imm)>0xF ? 47 : 63) - (char)(imm)); }) #define _mm256_bslli_epi128(a, count) _mm256_slli_si256((a), (count)) @@ -574,13 +619,13 @@ _mm256_sll_epi32(__m256i __a, __m128i __count) static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_slli_epi64(__m256i __a, int __count) { - return __builtin_ia32_psllqi256(__a, __count); + return __builtin_ia32_psllqi256((__v4di)__a, __count); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_sll_epi64(__m256i __a, __m128i __count) { - return __builtin_ia32_psllq256(__a, __count); + return __builtin_ia32_psllq256((__v4di)__a, __count); } static __inline__ __m256i __DEFAULT_FN_ATTRS @@ -607,9 +652,42 @@ _mm256_sra_epi32(__m256i __a, __m128i __count) return (__m256i)__builtin_ia32_psrad256((__v8si)__a, (__v4si)__count); } -#define _mm256_srli_si256(a, count) __extension__ ({ \ - __m256i __a = (a); \ - (__m256i)__builtin_ia32_psrldqi256(__a, (count)*8); }) +#define _mm256_srli_si256(a, imm) __extension__ ({ \ + (__m256i)__builtin_shufflevector( \ + (__v32qi)(__m256i)(a), \ + (__v32qi)_mm256_setzero_si256(), \ + ((char)(imm)&0xF0) ? 32 : (char)(imm) + ((char)(imm)>0xF ? 16 : 0), \ + ((char)(imm)&0xF0) ? 33 : (char)(imm) + ((char)(imm)>0xE ? 17 : 1), \ + ((char)(imm)&0xF0) ? 34 : (char)(imm) + ((char)(imm)>0xD ? 18 : 2), \ + ((char)(imm)&0xF0) ? 35 : (char)(imm) + ((char)(imm)>0xC ? 19 : 3), \ + ((char)(imm)&0xF0) ? 36 : (char)(imm) + ((char)(imm)>0xB ? 20 : 4), \ + ((char)(imm)&0xF0) ? 37 : (char)(imm) + ((char)(imm)>0xA ? 21 : 5), \ + ((char)(imm)&0xF0) ? 38 : (char)(imm) + ((char)(imm)>0x9 ? 22 : 6), \ + ((char)(imm)&0xF0) ? 39 : (char)(imm) + ((char)(imm)>0x8 ? 23 : 7), \ + ((char)(imm)&0xF0) ? 40 : (char)(imm) + ((char)(imm)>0x7 ? 24 : 8), \ + ((char)(imm)&0xF0) ? 41 : (char)(imm) + ((char)(imm)>0x6 ? 25 : 9), \ + ((char)(imm)&0xF0) ? 42 : (char)(imm) + ((char)(imm)>0x5 ? 26 : 10), \ + ((char)(imm)&0xF0) ? 43 : (char)(imm) + ((char)(imm)>0x4 ? 27 : 11), \ + ((char)(imm)&0xF0) ? 44 : (char)(imm) + ((char)(imm)>0x3 ? 28 : 12), \ + ((char)(imm)&0xF0) ? 45 : (char)(imm) + ((char)(imm)>0x2 ? 29 : 13), \ + ((char)(imm)&0xF0) ? 46 : (char)(imm) + ((char)(imm)>0x1 ? 30 : 14), \ + ((char)(imm)&0xF0) ? 47 : (char)(imm) + ((char)(imm)>0x0 ? 31 : 15), \ + ((char)(imm)&0xF0) ? 48 : (char)(imm) + ((char)(imm)>0xF ? 32 : 16), \ + ((char)(imm)&0xF0) ? 49 : (char)(imm) + ((char)(imm)>0xE ? 33 : 17), \ + ((char)(imm)&0xF0) ? 50 : (char)(imm) + ((char)(imm)>0xD ? 34 : 18), \ + ((char)(imm)&0xF0) ? 51 : (char)(imm) + ((char)(imm)>0xC ? 35 : 19), \ + ((char)(imm)&0xF0) ? 52 : (char)(imm) + ((char)(imm)>0xB ? 36 : 20), \ + ((char)(imm)&0xF0) ? 53 : (char)(imm) + ((char)(imm)>0xA ? 37 : 21), \ + ((char)(imm)&0xF0) ? 54 : (char)(imm) + ((char)(imm)>0x9 ? 38 : 22), \ + ((char)(imm)&0xF0) ? 55 : (char)(imm) + ((char)(imm)>0x8 ? 39 : 23), \ + ((char)(imm)&0xF0) ? 56 : (char)(imm) + ((char)(imm)>0x7 ? 40 : 24), \ + ((char)(imm)&0xF0) ? 57 : (char)(imm) + ((char)(imm)>0x6 ? 41 : 25), \ + ((char)(imm)&0xF0) ? 58 : (char)(imm) + ((char)(imm)>0x5 ? 42 : 26), \ + ((char)(imm)&0xF0) ? 59 : (char)(imm) + ((char)(imm)>0x4 ? 43 : 27), \ + ((char)(imm)&0xF0) ? 60 : (char)(imm) + ((char)(imm)>0x3 ? 44 : 28), \ + ((char)(imm)&0xF0) ? 61 : (char)(imm) + ((char)(imm)>0x2 ? 45 : 29), \ + ((char)(imm)&0xF0) ? 62 : (char)(imm) + ((char)(imm)>0x1 ? 46 : 30), \ + ((char)(imm)&0xF0) ? 63 : (char)(imm) + ((char)(imm)>0x0 ? 47 : 31)); }) #define _mm256_bsrli_epi128(a, count) _mm256_srli_si256((a), (count)) @@ -640,37 +718,37 @@ _mm256_srl_epi32(__m256i __a, __m128i __count) static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_srli_epi64(__m256i __a, int __count) { - return __builtin_ia32_psrlqi256(__a, __count); + return __builtin_ia32_psrlqi256((__v4di)__a, __count); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_srl_epi64(__m256i __a, __m128i __count) { - return __builtin_ia32_psrlq256(__a, __count); + return __builtin_ia32_psrlq256((__v4di)__a, __count); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_sub_epi8(__m256i __a, __m256i __b) { - return (__m256i)((__v32qi)__a - (__v32qi)__b); + return (__m256i)((__v32qu)__a - (__v32qu)__b); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_sub_epi16(__m256i __a, __m256i __b) { - return (__m256i)((__v16hi)__a - (__v16hi)__b); + return (__m256i)((__v16hu)__a - (__v16hu)__b); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_sub_epi32(__m256i __a, __m256i __b) { - return (__m256i)((__v8si)__a - (__v8si)__b); + return (__m256i)((__v8su)__a - (__v8su)__b); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_sub_epi64(__m256i __a, __m256i __b) { - return __a - __b; + return (__m256i)((__v4du)__a - (__v4du)__b); } static __inline__ __m256i __DEFAULT_FN_ATTRS @@ -718,7 +796,7 @@ _mm256_unpackhi_epi32(__m256i __a, __m256i __b) static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_unpackhi_epi64(__m256i __a, __m256i __b) { - return (__m256i)__builtin_shufflevector(__a, __b, 1, 4+1, 3, 4+3); + return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 1, 4+1, 3, 4+3); } static __inline__ __m256i __DEFAULT_FN_ATTRS @@ -742,64 +820,62 @@ _mm256_unpacklo_epi32(__m256i __a, __m256i __b) static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_unpacklo_epi64(__m256i __a, __m256i __b) { - return (__m256i)__builtin_shufflevector(__a, __b, 0, 4+0, 2, 4+2); + return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 0, 4+0, 2, 4+2); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_xor_si256(__m256i __a, __m256i __b) { - return __a ^ __b; + return (__m256i)((__v4du)__a ^ (__v4du)__b); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_stream_load_si256(__m256i *__V) +_mm256_stream_load_si256(__m256i const *__V) { - return (__m256i)__builtin_ia32_movntdqa256((__v4di *)__V); + return (__m256i)__builtin_ia32_movntdqa256((const __v4di *)__V); } static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_broadcastss_ps(__m128 __X) { - return (__m128)__builtin_ia32_vbroadcastss_ps((__v4sf)__X); + return (__m128)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_broadcastsd_pd(__m128d __a) { - return __builtin_shufflevector(__a, __a, 0, 0); + return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0); } static __inline__ __m256 __DEFAULT_FN_ATTRS _mm256_broadcastss_ps(__m128 __X) { - return (__m256)__builtin_ia32_vbroadcastss_ps256((__v4sf)__X); + return (__m256)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0, 0, 0, 0, 0); } static __inline__ __m256d __DEFAULT_FN_ATTRS _mm256_broadcastsd_pd(__m128d __X) { - return (__m256d)__builtin_ia32_vbroadcastsd_pd256((__v2df)__X); + return (__m256d)__builtin_shufflevector((__v2df)__X, (__v2df)__X, 0, 0, 0, 0); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_broadcastsi128_si256(__m128i __X) { - return (__m256i)__builtin_shufflevector(__X, __X, 0, 1, 0, 1); + return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 1, 0, 1); } #define _mm_blend_epi32(V1, V2, M) __extension__ ({ \ - __m128i __V1 = (V1); \ - __m128i __V2 = (V2); \ - (__m128i)__builtin_shufflevector((__v4si)__V1, (__v4si)__V2, \ + (__m128i)__builtin_shufflevector((__v4si)(__m128i)(V1), \ + (__v4si)(__m128i)(V2), \ (((M) & 0x01) ? 4 : 0), \ (((M) & 0x02) ? 5 : 1), \ (((M) & 0x04) ? 6 : 2), \ (((M) & 0x08) ? 7 : 3)); }) #define _mm256_blend_epi32(V1, V2, M) __extension__ ({ \ - __m256i __V1 = (V1); \ - __m256i __V2 = (V2); \ - (__m256i)__builtin_shufflevector((__v8si)__V1, (__v8si)__V2, \ + (__m256i)__builtin_shufflevector((__v8si)(__m256i)(V1), \ + (__v8si)(__m256i)(V2), \ (((M) & 0x01) ? 8 : 0), \ (((M) & 0x02) ? 9 : 1), \ (((M) & 0x04) ? 10 : 2), \ @@ -812,50 +888,50 @@ _mm256_broadcastsi128_si256(__m128i __X) static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_broadcastb_epi8(__m128i __X) { - return (__m256i)__builtin_ia32_pbroadcastb256((__v16qi)__X); + return (__m256i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_broadcastw_epi16(__m128i __X) { - return (__m256i)__builtin_ia32_pbroadcastw256((__v8hi)__X); + return (__m256i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_broadcastd_epi32(__m128i __X) { - return (__m256i)__builtin_ia32_pbroadcastd256((__v4si)__X); + return (__m256i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0, 0, 0, 0, 0); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_broadcastq_epi64(__m128i __X) { - return (__m256i)__builtin_ia32_pbroadcastq256(__X); + return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0, 0, 0); } static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_broadcastb_epi8(__m128i __X) { - return (__m128i)__builtin_ia32_pbroadcastb128((__v16qi)__X); + return (__m128i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); } static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_broadcastw_epi16(__m128i __X) { - return (__m128i)__builtin_ia32_pbroadcastw128((__v8hi)__X); + return (__m128i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0); } static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_broadcastd_epi32(__m128i __X) { - return (__m128i)__builtin_ia32_pbroadcastd128((__v4si)__X); + return (__m128i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0); } static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_broadcastq_epi64(__m128i __X) { - return (__m128i)__builtin_ia32_pbroadcastq128(__X); + return (__m128i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0); } static __inline__ __m256i __DEFAULT_FN_ATTRS @@ -865,43 +941,43 @@ _mm256_permutevar8x32_epi32(__m256i __a, __m256i __b) } #define _mm256_permute4x64_pd(V, M) __extension__ ({ \ - __m256d __V = (V); \ - (__m256d)__builtin_shufflevector((__v4df)__V, (__v4df) _mm256_setzero_pd(), \ - (M) & 0x3, ((M) & 0xc) >> 2, \ - ((M) & 0x30) >> 4, ((M) & 0xc0) >> 6); }) + (__m256d)__builtin_shufflevector((__v4df)(__m256d)(V), \ + (__v4df)_mm256_undefined_pd(), \ + ((M) >> 0) & 0x3, \ + ((M) >> 2) & 0x3, \ + ((M) >> 4) & 0x3, \ + ((M) >> 6) & 0x3); }) static __inline__ __m256 __DEFAULT_FN_ATTRS -_mm256_permutevar8x32_ps(__m256 __a, __m256 __b) +_mm256_permutevar8x32_ps(__m256 __a, __m256i __b) { - return (__m256)__builtin_ia32_permvarsf256((__v8sf)__a, (__v8sf)__b); + return (__m256)__builtin_ia32_permvarsf256((__v8sf)__a, (__v8si)__b); } #define _mm256_permute4x64_epi64(V, M) __extension__ ({ \ - __m256i __V = (V); \ - (__m256i)__builtin_shufflevector((__v4di)__V, (__v4di) _mm256_setzero_si256(), \ - (M) & 0x3, ((M) & 0xc) >> 2, \ - ((M) & 0x30) >> 4, ((M) & 0xc0) >> 6); }) + (__m256i)__builtin_shufflevector((__v4di)(__m256i)(V), \ + (__v4di)_mm256_undefined_si256(), \ + ((M) >> 0) & 0x3, \ + ((M) >> 2) & 0x3, \ + ((M) >> 4) & 0x3, \ + ((M) >> 6) & 0x3); }) #define _mm256_permute2x128_si256(V1, V2, M) __extension__ ({ \ - __m256i __V1 = (V1); \ - __m256i __V2 = (V2); \ - (__m256i)__builtin_ia32_permti256(__V1, __V2, (M)); }) + (__m256i)__builtin_ia32_permti256((__m256i)(V1), (__m256i)(V2), (M)); }) #define _mm256_extracti128_si256(V, M) __extension__ ({ \ - (__m128i)__builtin_shufflevector( \ - (__v4di)(V), \ - (__v4di)(_mm256_setzero_si256()), \ - (((M) & 1) ? 2 : 0), \ - (((M) & 1) ? 3 : 1) );}) + (__m128i)__builtin_shufflevector((__v4di)(__m256i)(V), \ + (__v4di)_mm256_undefined_si256(), \ + (((M) & 1) ? 2 : 0), \ + (((M) & 1) ? 3 : 1) ); }) #define _mm256_inserti128_si256(V1, V2, M) __extension__ ({ \ - (__m256i)__builtin_shufflevector( \ - (__v4di)(V1), \ - (__v4di)_mm256_castsi128_si256((__m128i)(V2)), \ - (((M) & 1) ? 0 : 4), \ - (((M) & 1) ? 1 : 5), \ - (((M) & 1) ? 4 : 2), \ - (((M) & 1) ? 5 : 3) );}) + (__m256i)__builtin_shufflevector((__v4di)(__m256i)(V1), \ + (__v4di)_mm256_castsi128_si256((__m128i)(V2)), \ + (((M) & 1) ? 0 : 4), \ + (((M) & 1) ? 1 : 5), \ + (((M) & 1) ? 4 : 2), \ + (((M) & 1) ? 5 : 3) ); }) static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_maskload_epi32(int const *__X, __m256i __M) @@ -912,7 +988,7 @@ _mm256_maskload_epi32(int const *__X, __m256i __M) static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_maskload_epi64(long long const *__X, __m256i __M) { - return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, __M); + return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, (__v4di)__M); } static __inline__ __m128i __DEFAULT_FN_ATTRS @@ -936,7 +1012,7 @@ _mm256_maskstore_epi32(int *__X, __m256i __M, __m256i __Y) static __inline__ void __DEFAULT_FN_ATTRS _mm256_maskstore_epi64(long long *__X, __m256i __M, __m256i __Y) { - __builtin_ia32_maskstoreq256((__v4di *)__X, __M, __Y); + __builtin_ia32_maskstoreq256((__v4di *)__X, (__v4di)__M, (__v4di)__Y); } static __inline__ void __DEFAULT_FN_ATTRS @@ -948,7 +1024,7 @@ _mm_maskstore_epi32(int *__X, __m128i __M, __m128i __Y) static __inline__ void __DEFAULT_FN_ATTRS _mm_maskstore_epi64(long long *__X, __m128i __M, __m128i __Y) { - __builtin_ia32_maskstoreq(( __v2di *)__X, __M, __Y); + __builtin_ia32_maskstoreq(( __v2di *)__X, (__v2di)__M, (__v2di)__Y); } static __inline__ __m256i __DEFAULT_FN_ATTRS @@ -966,13 +1042,13 @@ _mm_sllv_epi32(__m128i __X, __m128i __Y) static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_sllv_epi64(__m256i __X, __m256i __Y) { - return (__m256i)__builtin_ia32_psllv4di(__X, __Y); + return (__m256i)__builtin_ia32_psllv4di((__v4di)__X, (__v4di)__Y); } static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sllv_epi64(__m128i __X, __m128i __Y) { - return (__m128i)__builtin_ia32_psllv2di(__X, __Y); + return (__m128i)__builtin_ia32_psllv2di((__v2di)__X, (__v2di)__Y); } static __inline__ __m256i __DEFAULT_FN_ATTRS @@ -1002,254 +1078,221 @@ _mm_srlv_epi32(__m128i __X, __m128i __Y) static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_srlv_epi64(__m256i __X, __m256i __Y) { - return (__m256i)__builtin_ia32_psrlv4di(__X, __Y); + return (__m256i)__builtin_ia32_psrlv4di((__v4di)__X, (__v4di)__Y); } static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srlv_epi64(__m128i __X, __m128i __Y) { - return (__m128i)__builtin_ia32_psrlv2di(__X, __Y); + return (__m128i)__builtin_ia32_psrlv2di((__v2di)__X, (__v2di)__Y); } #define _mm_mask_i32gather_pd(a, m, i, mask, s) __extension__ ({ \ - __m128d __a = (a); \ - double const *__m = (m); \ - __m128i __i = (i); \ - __m128d __mask = (mask); \ - (__m128d)__builtin_ia32_gatherd_pd((__v2df)__a, (const __v2df *)__m, \ - (__v4si)__i, (__v2df)__mask, (s)); }) + (__m128d)__builtin_ia32_gatherd_pd((__v2df)(__m128i)(a), \ + (double const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v2df)(__m128d)(mask), (s)); }) #define _mm256_mask_i32gather_pd(a, m, i, mask, s) __extension__ ({ \ - __m256d __a = (a); \ - double const *__m = (m); \ - __m128i __i = (i); \ - __m256d __mask = (mask); \ - (__m256d)__builtin_ia32_gatherd_pd256((__v4df)__a, (const __v4df *)__m, \ - (__v4si)__i, (__v4df)__mask, (s)); }) + (__m256d)__builtin_ia32_gatherd_pd256((__v4df)(__m256d)(a), \ + (double const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4df)(__m256d)(mask), (s)); }) #define _mm_mask_i64gather_pd(a, m, i, mask, s) __extension__ ({ \ - __m128d __a = (a); \ - double const *__m = (m); \ - __m128i __i = (i); \ - __m128d __mask = (mask); \ - (__m128d)__builtin_ia32_gatherq_pd((__v2df)__a, (const __v2df *)__m, \ - (__v2di)__i, (__v2df)__mask, (s)); }) + (__m128d)__builtin_ia32_gatherq_pd((__v2df)(__m128d)(a), \ + (double const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v2df)(__m128d)(mask), (s)); }) #define _mm256_mask_i64gather_pd(a, m, i, mask, s) __extension__ ({ \ - __m256d __a = (a); \ - double const *__m = (m); \ - __m256i __i = (i); \ - __m256d __mask = (mask); \ - (__m256d)__builtin_ia32_gatherq_pd256((__v4df)__a, (const __v4df *)__m, \ - (__v4di)__i, (__v4df)__mask, (s)); }) + (__m256d)__builtin_ia32_gatherq_pd256((__v4df)(__m256d)(a), \ + (double const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4df)(__m256d)(mask), (s)); }) #define _mm_mask_i32gather_ps(a, m, i, mask, s) __extension__ ({ \ - __m128 __a = (a); \ - float const *__m = (m); \ - __m128i __i = (i); \ - __m128 __mask = (mask); \ - (__m128)__builtin_ia32_gatherd_ps((__v4sf)__a, (const __v4sf *)__m, \ - (__v4si)__i, (__v4sf)__mask, (s)); }) + (__m128)__builtin_ia32_gatherd_ps((__v4sf)(__m128)(a), \ + (float const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4sf)(__m128)(mask), (s)); }) #define _mm256_mask_i32gather_ps(a, m, i, mask, s) __extension__ ({ \ - __m256 __a = (a); \ - float const *__m = (m); \ - __m256i __i = (i); \ - __m256 __mask = (mask); \ - (__m256)__builtin_ia32_gatherd_ps256((__v8sf)__a, (const __v8sf *)__m, \ - (__v8si)__i, (__v8sf)__mask, (s)); }) + (__m256)__builtin_ia32_gatherd_ps256((__v8sf)(__m256)(a), \ + (float const *)(m), \ + (__v8si)(__m256i)(i), \ + (__v8sf)(__m256)(mask), (s)); }) #define _mm_mask_i64gather_ps(a, m, i, mask, s) __extension__ ({ \ - __m128 __a = (a); \ - float const *__m = (m); \ - __m128i __i = (i); \ - __m128 __mask = (mask); \ - (__m128)__builtin_ia32_gatherq_ps((__v4sf)__a, (const __v4sf *)__m, \ - (__v2di)__i, (__v4sf)__mask, (s)); }) + (__m128)__builtin_ia32_gatherq_ps((__v4sf)(__m128)(a), \ + (float const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v4sf)(__m128)(mask), (s)); }) #define _mm256_mask_i64gather_ps(a, m, i, mask, s) __extension__ ({ \ - __m128 __a = (a); \ - float const *__m = (m); \ - __m256i __i = (i); \ - __m128 __mask = (mask); \ - (__m128)__builtin_ia32_gatherq_ps256((__v4sf)__a, (const __v4sf *)__m, \ - (__v4di)__i, (__v4sf)__mask, (s)); }) + (__m128)__builtin_ia32_gatherq_ps256((__v4sf)(__m128)(a), \ + (float const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4sf)(__m128)(mask), (s)); }) #define _mm_mask_i32gather_epi32(a, m, i, mask, s) __extension__ ({ \ - __m128i __a = (a); \ - int const *__m = (m); \ - __m128i __i = (i); \ - __m128i __mask = (mask); \ - (__m128i)__builtin_ia32_gatherd_d((__v4si)__a, (const __v4si *)__m, \ - (__v4si)__i, (__v4si)__mask, (s)); }) + (__m128i)__builtin_ia32_gatherd_d((__v4si)(__m128i)(a), \ + (int const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4si)(__m128i)(mask), (s)); }) #define _mm256_mask_i32gather_epi32(a, m, i, mask, s) __extension__ ({ \ - __m256i __a = (a); \ - int const *__m = (m); \ - __m256i __i = (i); \ - __m256i __mask = (mask); \ - (__m256i)__builtin_ia32_gatherd_d256((__v8si)__a, (const __v8si *)__m, \ - (__v8si)__i, (__v8si)__mask, (s)); }) + (__m256i)__builtin_ia32_gatherd_d256((__v8si)(__m256i)(a), \ + (int const *)(m), \ + (__v8si)(__m256i)(i), \ + (__v8si)(__m256i)(mask), (s)); }) #define _mm_mask_i64gather_epi32(a, m, i, mask, s) __extension__ ({ \ - __m128i __a = (a); \ - int const *__m = (m); \ - __m128i __i = (i); \ - __m128i __mask = (mask); \ - (__m128i)__builtin_ia32_gatherq_d((__v4si)__a, (const __v4si *)__m, \ - (__v2di)__i, (__v4si)__mask, (s)); }) + (__m128i)__builtin_ia32_gatherq_d((__v4si)(__m128i)(a), \ + (int const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v4si)(__m128i)(mask), (s)); }) #define _mm256_mask_i64gather_epi32(a, m, i, mask, s) __extension__ ({ \ - __m128i __a = (a); \ - int const *__m = (m); \ - __m256i __i = (i); \ - __m128i __mask = (mask); \ - (__m128i)__builtin_ia32_gatherq_d256((__v4si)__a, (const __v4si *)__m, \ - (__v4di)__i, (__v4si)__mask, (s)); }) + (__m128i)__builtin_ia32_gatherq_d256((__v4si)(__m128i)(a), \ + (int const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4si)(__m128i)(mask), (s)); }) #define _mm_mask_i32gather_epi64(a, m, i, mask, s) __extension__ ({ \ - __m128i __a = (a); \ - long long const *__m = (m); \ - __m128i __i = (i); \ - __m128i __mask = (mask); \ - (__m128i)__builtin_ia32_gatherd_q((__v2di)__a, (const __v2di *)__m, \ - (__v4si)__i, (__v2di)__mask, (s)); }) + (__m128i)__builtin_ia32_gatherd_q((__v2di)(__m128i)(a), \ + (long long const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v2di)(__m128i)(mask), (s)); }) #define _mm256_mask_i32gather_epi64(a, m, i, mask, s) __extension__ ({ \ - __m256i __a = (a); \ - long long const *__m = (m); \ - __m128i __i = (i); \ - __m256i __mask = (mask); \ - (__m256i)__builtin_ia32_gatherd_q256((__v4di)__a, (const __v4di *)__m, \ - (__v4si)__i, (__v4di)__mask, (s)); }) + (__m256i)__builtin_ia32_gatherd_q256((__v4di)(__m256i)(a), \ + (long long const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4di)(__m256i)(mask), (s)); }) #define _mm_mask_i64gather_epi64(a, m, i, mask, s) __extension__ ({ \ - __m128i __a = (a); \ - long long const *__m = (m); \ - __m128i __i = (i); \ - __m128i __mask = (mask); \ - (__m128i)__builtin_ia32_gatherq_q((__v2di)__a, (const __v2di *)__m, \ - (__v2di)__i, (__v2di)__mask, (s)); }) + (__m128i)__builtin_ia32_gatherq_q((__v2di)(__m128i)(a), \ + (long long const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v2di)(__m128i)(mask), (s)); }) #define _mm256_mask_i64gather_epi64(a, m, i, mask, s) __extension__ ({ \ - __m256i __a = (a); \ - long long const *__m = (m); \ - __m256i __i = (i); \ - __m256i __mask = (mask); \ - (__m256i)__builtin_ia32_gatherq_q256((__v4di)__a, (const __v4di *)__m, \ - (__v4di)__i, (__v4di)__mask, (s)); }) + (__m256i)__builtin_ia32_gatherq_q256((__v4di)(__m256i)(a), \ + (long long const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4di)(__m256i)(mask), (s)); }) #define _mm_i32gather_pd(m, i, s) __extension__ ({ \ - double const *__m = (m); \ - __m128i __i = (i); \ - (__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_setzero_pd(), \ - (const __v2df *)__m, (__v4si)__i, \ - (__v2df)_mm_set1_pd((double)(long long int)-1), (s)); }) + (__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_undefined_pd(), \ + (double const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \ + _mm_setzero_pd()), \ + (s)); }) #define _mm256_i32gather_pd(m, i, s) __extension__ ({ \ - double const *__m = (m); \ - __m128i __i = (i); \ - (__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_setzero_pd(), \ - (const __v4df *)__m, (__v4si)__i, \ - (__v4df)_mm256_set1_pd((double)(long long int)-1), (s)); }) + (__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_undefined_pd(), \ + (double const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \ + _mm256_setzero_pd(), \ + _CMP_EQ_OQ), \ + (s)); }) #define _mm_i64gather_pd(m, i, s) __extension__ ({ \ - double const *__m = (m); \ - __m128i __i = (i); \ - (__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_setzero_pd(), \ - (const __v2df *)__m, (__v2di)__i, \ - (__v2df)_mm_set1_pd((double)(long long int)-1), (s)); }) + (__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_undefined_pd(), \ + (double const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \ + _mm_setzero_pd()), \ + (s)); }) #define _mm256_i64gather_pd(m, i, s) __extension__ ({ \ - double const *__m = (m); \ - __m256i __i = (i); \ - (__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_setzero_pd(), \ - (const __v4df *)__m, (__v4di)__i, \ - (__v4df)_mm256_set1_pd((double)(long long int)-1), (s)); }) + (__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_undefined_pd(), \ + (double const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \ + _mm256_setzero_pd(), \ + _CMP_EQ_OQ), \ + (s)); }) #define _mm_i32gather_ps(m, i, s) __extension__ ({ \ - float const *__m = (m); \ - __m128i __i = (i); \ - (__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_setzero_ps(), \ - (const __v4sf *)__m, (__v4si)__i, \ - (__v4sf)_mm_set1_ps((float)(int)-1), (s)); }) + (__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_undefined_ps(), \ + (float const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \ + _mm_setzero_ps()), \ + (s)); }) #define _mm256_i32gather_ps(m, i, s) __extension__ ({ \ - float const *__m = (m); \ - __m256i __i = (i); \ - (__m256)__builtin_ia32_gatherd_ps256((__v8sf)_mm256_setzero_ps(), \ - (const __v8sf *)__m, (__v8si)__i, \ - (__v8sf)_mm256_set1_ps((float)(int)-1), (s)); }) + (__m256)__builtin_ia32_gatherd_ps256((__v8sf)_mm256_undefined_ps(), \ + (float const *)(m), \ + (__v8si)(__m256i)(i), \ + (__v8sf)_mm256_cmp_ps(_mm256_setzero_ps(), \ + _mm256_setzero_ps(), \ + _CMP_EQ_OQ), \ + (s)); }) #define _mm_i64gather_ps(m, i, s) __extension__ ({ \ - float const *__m = (m); \ - __m128i __i = (i); \ - (__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_setzero_ps(), \ - (const __v4sf *)__m, (__v2di)__i, \ - (__v4sf)_mm_set1_ps((float)(int)-1), (s)); }) + (__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_undefined_ps(), \ + (float const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \ + _mm_setzero_ps()), \ + (s)); }) #define _mm256_i64gather_ps(m, i, s) __extension__ ({ \ - float const *__m = (m); \ - __m256i __i = (i); \ - (__m128)__builtin_ia32_gatherq_ps256((__v4sf)_mm_setzero_ps(), \ - (const __v4sf *)__m, (__v4di)__i, \ - (__v4sf)_mm_set1_ps((float)(int)-1), (s)); }) + (__m128)__builtin_ia32_gatherq_ps256((__v4sf)_mm_undefined_ps(), \ + (float const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \ + _mm_setzero_ps()), \ + (s)); }) #define _mm_i32gather_epi32(m, i, s) __extension__ ({ \ - int const *__m = (m); \ - __m128i __i = (i); \ - (__m128i)__builtin_ia32_gatherd_d((__v4si)_mm_setzero_si128(), \ - (const __v4si *)__m, (__v4si)__i, \ - (__v4si)_mm_set1_epi32(-1), (s)); }) + (__m128i)__builtin_ia32_gatherd_d((__v4si)_mm_undefined_si128(), \ + (int const *)(m), (__v4si)(__m128i)(i), \ + (__v4si)_mm_set1_epi32(-1), (s)); }) #define _mm256_i32gather_epi32(m, i, s) __extension__ ({ \ - int const *__m = (m); \ - __m256i __i = (i); \ - (__m256i)__builtin_ia32_gatherd_d256((__v8si)_mm256_setzero_si256(), \ - (const __v8si *)__m, (__v8si)__i, \ - (__v8si)_mm256_set1_epi32(-1), (s)); }) + (__m256i)__builtin_ia32_gatherd_d256((__v8si)_mm256_undefined_si256(), \ + (int const *)(m), (__v8si)(__m256i)(i), \ + (__v8si)_mm256_set1_epi32(-1), (s)); }) #define _mm_i64gather_epi32(m, i, s) __extension__ ({ \ - int const *__m = (m); \ - __m128i __i = (i); \ - (__m128i)__builtin_ia32_gatherq_d((__v4si)_mm_setzero_si128(), \ - (const __v4si *)__m, (__v2di)__i, \ - (__v4si)_mm_set1_epi32(-1), (s)); }) + (__m128i)__builtin_ia32_gatherq_d((__v4si)_mm_undefined_si128(), \ + (int const *)(m), (__v2di)(__m128i)(i), \ + (__v4si)_mm_set1_epi32(-1), (s)); }) #define _mm256_i64gather_epi32(m, i, s) __extension__ ({ \ - int const *__m = (m); \ - __m256i __i = (i); \ - (__m128i)__builtin_ia32_gatherq_d256((__v4si)_mm_setzero_si128(), \ - (const __v4si *)__m, (__v4di)__i, \ - (__v4si)_mm_set1_epi32(-1), (s)); }) + (__m128i)__builtin_ia32_gatherq_d256((__v4si)_mm_undefined_si128(), \ + (int const *)(m), (__v4di)(__m256i)(i), \ + (__v4si)_mm_set1_epi32(-1), (s)); }) #define _mm_i32gather_epi64(m, i, s) __extension__ ({ \ - long long const *__m = (m); \ - __m128i __i = (i); \ - (__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_setzero_si128(), \ - (const __v2di *)__m, (__v4si)__i, \ - (__v2di)_mm_set1_epi64x(-1), (s)); }) + (__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_undefined_si128(), \ + (long long const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v2di)_mm_set1_epi64x(-1), (s)); }) #define _mm256_i32gather_epi64(m, i, s) __extension__ ({ \ - long long const *__m = (m); \ - __m128i __i = (i); \ - (__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_setzero_si256(), \ - (const __v4di *)__m, (__v4si)__i, \ - (__v4di)_mm256_set1_epi64x(-1), (s)); }) + (__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_undefined_si256(), \ + (long long const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4di)_mm256_set1_epi64x(-1), (s)); }) #define _mm_i64gather_epi64(m, i, s) __extension__ ({ \ - long long const *__m = (m); \ - __m128i __i = (i); \ - (__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_setzero_si128(), \ - (const __v2di *)__m, (__v2di)__i, \ - (__v2di)_mm_set1_epi64x(-1), (s)); }) + (__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_undefined_si128(), \ + (long long const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v2di)_mm_set1_epi64x(-1), (s)); }) #define _mm256_i64gather_epi64(m, i, s) __extension__ ({ \ - long long const *__m = (m); \ - __m256i __i = (i); \ - (__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_setzero_si256(), \ - (const __v4di *)__m, (__v4di)__i, \ - (__v4di)_mm256_set1_epi64x(-1), (s)); }) + (__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_undefined_si256(), \ + (long long const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4di)_mm256_set1_epi64x(-1), (s)); }) #undef __DEFAULT_FN_ATTRS diff --git a/c_headers/avx512bwintrin.h b/c_headers/avx512bwintrin.h index 9e8297a9c..629dc8611 100644 --- a/c_headers/avx512bwintrin.h +++ b/c_headers/avx512bwintrin.h @@ -30,30 +30,28 @@ typedef unsigned int __mmask32; typedef unsigned long long __mmask64; -typedef char __v64qi __attribute__ ((__vector_size__ (64))); -typedef short __v32hi __attribute__ ((__vector_size__ (64))); /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512bw"))) -static __inline __v64qi __DEFAULT_FN_ATTRS -_mm512_setzero_qi (void) { - return (__v64qi){ 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0 }; +static __inline __m512i __DEFAULT_FN_ATTRS +_mm512_setzero_qi(void) { + return (__m512i)(__v64qi){ 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0 }; } -static __inline __v32hi __DEFAULT_FN_ATTRS -_mm512_setzero_hi (void) { - return (__v32hi){ 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0 }; +static __inline __m512i __DEFAULT_FN_ATTRS +_mm512_setzero_hi(void) { + return (__m512i)(__v32hi){ 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0 }; } /* Integer compare */ @@ -348,135 +346,120 @@ _mm512_mask_cmpneq_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) { static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_add_epi8 (__m512i __A, __m512i __B) { - return (__m512i) ((__v64qi) __A + (__v64qi) __B); + return (__m512i) ((__v64qu) __A + (__v64qu) __B); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_mask_add_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_paddb512_mask ((__v64qi) __A, - (__v64qi) __B, - (__v64qi) __W, - (__mmask64) __U); +_mm512_mask_add_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_add_epi8(__A, __B), + (__v64qi)__W); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_maskz_add_epi8 (__mmask64 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_paddb512_mask ((__v64qi) __A, - (__v64qi) __B, - (__v64qi) - _mm512_setzero_qi (), - (__mmask64) __U); +_mm512_maskz_add_epi8(__mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_add_epi8(__A, __B), + (__v64qi)_mm512_setzero_qi()); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_sub_epi8 (__m512i __A, __m512i __B) { - return (__m512i) ((__v64qi) __A - (__v64qi) __B); + return (__m512i) ((__v64qu) __A - (__v64qu) __B); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_mask_sub_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_psubb512_mask ((__v64qi) __A, - (__v64qi) __B, - (__v64qi) __W, - (__mmask64) __U); +_mm512_mask_sub_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_sub_epi8(__A, __B), + (__v64qi)__W); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_maskz_sub_epi8 (__mmask64 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_psubb512_mask ((__v64qi) __A, - (__v64qi) __B, - (__v64qi) - _mm512_setzero_qi (), - (__mmask64) __U); +_mm512_maskz_sub_epi8(__mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_sub_epi8(__A, __B), + (__v64qi)_mm512_setzero_qi()); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_add_epi16 (__m512i __A, __m512i __B) { - return (__m512i) ((__v32hi) __A + (__v32hi) __B); + return (__m512i) ((__v32hu) __A + (__v32hu) __B); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_mask_add_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_paddw512_mask ((__v32hi) __A, - (__v32hi) __B, - (__v32hi) __W, - (__mmask32) __U); +_mm512_mask_add_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_add_epi16(__A, __B), + (__v32hi)__W); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_maskz_add_epi16 (__mmask32 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_paddw512_mask ((__v32hi) __A, - (__v32hi) __B, - (__v32hi) - _mm512_setzero_hi (), - (__mmask32) __U); +_mm512_maskz_add_epi16(__mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_add_epi16(__A, __B), + (__v32hi)_mm512_setzero_hi()); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_sub_epi16 (__m512i __A, __m512i __B) { - return (__m512i) ((__v32hi) __A - (__v32hi) __B); + return (__m512i) ((__v32hu) __A - (__v32hu) __B); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_mask_sub_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_psubw512_mask ((__v32hi) __A, - (__v32hi) __B, - (__v32hi) __W, - (__mmask32) __U); +_mm512_mask_sub_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_sub_epi16(__A, __B), + (__v32hi)__W); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_maskz_sub_epi16 (__mmask32 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_psubw512_mask ((__v32hi) __A, - (__v32hi) __B, - (__v32hi) - _mm512_setzero_hi (), - (__mmask32) __U); +_mm512_maskz_sub_epi16(__mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_sub_epi16(__A, __B), + (__v32hi)_mm512_setzero_hi()); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mullo_epi16 (__m512i __A, __m512i __B) { - return (__m512i) ((__v32hi) __A * (__v32hi) __B); + return (__m512i) ((__v32hu) __A * (__v32hu) __B); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_mask_mullo_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_pmullw512_mask ((__v32hi) __A, - (__v32hi) __B, - (__v32hi) __W, - (__mmask32) __U); +_mm512_mask_mullo_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_mullo_epi16(__A, __B), + (__v32hi)__W); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_maskz_mullo_epi16 (__mmask32 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_pmullw512_mask ((__v32hi) __A, - (__v32hi) __B, - (__v32hi) - _mm512_setzero_hi (), - (__mmask32) __U); +_mm512_maskz_mullo_epi16(__mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_mullo_epi16(__A, __B), + (__v32hi)_mm512_setzero_hi()); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_blend_epi8 (__mmask64 __U, __m512i __A, __m512i __W) { - return (__m512i) __builtin_ia32_blendmb_512_mask ((__v64qi) __A, + return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U, (__v64qi) __W, - (__mmask64) __U); + (__v64qi) __A); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_blend_epi16 (__mmask32 __U, __m512i __A, __m512i __W) { - return (__m512i) __builtin_ia32_blendmw_512_mask ((__v32hi) __A, + return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U, (__v32hi) __W, - (__mmask32) __U); + (__v32hi) __A); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_abs_epi8 (__m512i __A) { return (__m512i) __builtin_ia32_pabsb512_mask ((__v64qi) __A, - (__v64qi) _mm512_setzero_qi (), + (__v64qi) _mm512_setzero_qi(), (__mmask64) -1); } @@ -492,7 +475,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_abs_epi8 (__mmask64 __U, __m512i __A) { return (__m512i) __builtin_ia32_pabsb512_mask ((__v64qi) __A, - (__v64qi) _mm512_setzero_qi (), + (__v64qi) _mm512_setzero_qi(), (__mmask64) __U); } @@ -500,7 +483,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_abs_epi16 (__m512i __A) { return (__m512i) __builtin_ia32_pabsw512_mask ((__v32hi) __A, - (__v32hi) _mm512_setzero_hi (), + (__v32hi) _mm512_setzero_hi(), (__mmask32) -1); } @@ -516,7 +499,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_abs_epi16 (__mmask32 __U, __m512i __A) { return (__m512i) __builtin_ia32_pabsw512_mask ((__v32hi) __A, - (__v32hi) _mm512_setzero_hi (), + (__v32hi) _mm512_setzero_hi(), (__mmask32) __U); } @@ -525,7 +508,7 @@ _mm512_packs_epi32 (__m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A, (__v16si) __B, - (__v32hi) _mm512_setzero_hi (), + (__v32hi) _mm512_setzero_hi(), (__mmask32) -1); } @@ -553,7 +536,7 @@ _mm512_packs_epi16 (__m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_packsswb512_mask ((__v32hi) __A, (__v32hi) __B, - (__v64qi) _mm512_setzero_qi (), + (__v64qi) _mm512_setzero_qi(), (__mmask64) -1); } @@ -581,7 +564,7 @@ _mm512_packus_epi32 (__m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A, (__v16si) __B, - (__v32hi) _mm512_setzero_hi (), + (__v32hi) _mm512_setzero_hi(), (__mmask32) -1); } @@ -609,7 +592,7 @@ _mm512_packus_epi16 (__m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_packuswb512_mask ((__v32hi) __A, (__v32hi) __B, - (__v64qi) _mm512_setzero_qi (), + (__v64qi) _mm512_setzero_qi(), (__mmask64) -1); } @@ -637,7 +620,7 @@ _mm512_adds_epi8 (__m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_paddsb512_mask ((__v64qi) __A, (__v64qi) __B, - (__v64qi) _mm512_setzero_qi (), + (__v64qi) _mm512_setzero_qi(), (__mmask64) -1); } @@ -656,7 +639,7 @@ _mm512_maskz_adds_epi8 (__mmask64 __U, __m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_paddsb512_mask ((__v64qi) __A, (__v64qi) __B, - (__v64qi) _mm512_setzero_qi (), + (__v64qi) _mm512_setzero_qi(), (__mmask64) __U); } @@ -665,7 +648,7 @@ _mm512_adds_epi16 (__m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_paddsw512_mask ((__v32hi) __A, (__v32hi) __B, - (__v32hi) _mm512_setzero_hi (), + (__v32hi) _mm512_setzero_hi(), (__mmask32) -1); } @@ -684,7 +667,7 @@ _mm512_maskz_adds_epi16 (__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_paddsw512_mask ((__v32hi) __A, (__v32hi) __B, - (__v32hi) _mm512_setzero_hi (), + (__v32hi) _mm512_setzero_hi(), (__mmask32) __U); } @@ -693,7 +676,7 @@ _mm512_adds_epu8 (__m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A, (__v64qi) __B, - (__v64qi) _mm512_setzero_qi (), + (__v64qi) _mm512_setzero_qi(), (__mmask64) -1); } @@ -712,7 +695,7 @@ _mm512_maskz_adds_epu8 (__mmask64 __U, __m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A, (__v64qi) __B, - (__v64qi) _mm512_setzero_qi (), + (__v64qi) _mm512_setzero_qi(), (__mmask64) __U); } @@ -721,7 +704,7 @@ _mm512_adds_epu16 (__m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_paddusw512_mask ((__v32hi) __A, (__v32hi) __B, - (__v32hi) _mm512_setzero_hi (), + (__v32hi) _mm512_setzero_hi(), (__mmask32) -1); } @@ -740,7 +723,7 @@ _mm512_maskz_adds_epu16 (__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_paddusw512_mask ((__v32hi) __A, (__v32hi) __B, - (__v32hi) _mm512_setzero_hi (), + (__v32hi) _mm512_setzero_hi(), (__mmask32) __U); } @@ -749,7 +732,7 @@ _mm512_avg_epu8 (__m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_pavgb512_mask ((__v64qi) __A, (__v64qi) __B, - (__v64qi) _mm512_setzero_qi (), + (__v64qi) _mm512_setzero_qi(), (__mmask64) -1); } @@ -777,7 +760,7 @@ _mm512_avg_epu16 (__m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_pavgw512_mask ((__v32hi) __A, (__v32hi) __B, - (__v32hi) _mm512_setzero_hi (), + (__v32hi) _mm512_setzero_hi(), (__mmask32) -1); } @@ -805,7 +788,7 @@ _mm512_max_epi8 (__m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_pmaxsb512_mask ((__v64qi) __A, (__v64qi) __B, - (__v64qi) _mm512_setzero_qi (), + (__v64qi) _mm512_setzero_qi(), (__mmask64) -1); } @@ -833,7 +816,7 @@ _mm512_max_epi16 (__m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_pmaxsw512_mask ((__v32hi) __A, (__v32hi) __B, - (__v32hi) _mm512_setzero_hi (), + (__v32hi) _mm512_setzero_hi(), (__mmask32) -1); } @@ -861,7 +844,7 @@ _mm512_max_epu8 (__m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_pmaxub512_mask ((__v64qi) __A, (__v64qi) __B, - (__v64qi) _mm512_setzero_qi (), + (__v64qi) _mm512_setzero_qi(), (__mmask64) -1); } @@ -889,7 +872,7 @@ _mm512_max_epu16 (__m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_pmaxuw512_mask ((__v32hi) __A, (__v32hi) __B, - (__v32hi) _mm512_setzero_hi (), + (__v32hi) _mm512_setzero_hi(), (__mmask32) -1); } @@ -917,7 +900,7 @@ _mm512_min_epi8 (__m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_pminsb512_mask ((__v64qi) __A, (__v64qi) __B, - (__v64qi) _mm512_setzero_qi (), + (__v64qi) _mm512_setzero_qi(), (__mmask64) -1); } @@ -945,7 +928,7 @@ _mm512_min_epi16 (__m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_pminsw512_mask ((__v32hi) __A, (__v32hi) __B, - (__v32hi) _mm512_setzero_hi (), + (__v32hi) _mm512_setzero_hi(), (__mmask32) -1); } @@ -973,7 +956,7 @@ _mm512_min_epu8 (__m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_pminub512_mask ((__v64qi) __A, (__v64qi) __B, - (__v64qi) _mm512_setzero_qi (), + (__v64qi) _mm512_setzero_qi(), (__mmask64) -1); } @@ -1001,7 +984,7 @@ _mm512_min_epu16 (__m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_pminuw512_mask ((__v32hi) __A, (__v32hi) __B, - (__v32hi) _mm512_setzero_hi (), + (__v32hi) _mm512_setzero_hi(), (__mmask32) -1); } @@ -1025,31 +1008,25 @@ _mm512_mask_min_epu16 (__m512i __W, __mmask32 __M, __m512i __A, } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_shuffle_epi8 (__m512i __A, __m512i __B) +_mm512_shuffle_epi8(__m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_pshufb512_mask ((__v64qi) __A, - (__v64qi) __B, - (__v64qi) _mm512_setzero_qi (), - (__mmask64) -1); + return (__m512i)__builtin_ia32_pshufb512((__v64qi)__A,(__v64qi)__B); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_mask_shuffle_epi8 (__m512i __W, __mmask64 __U, __m512i __A, - __m512i __B) +_mm512_mask_shuffle_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_pshufb512_mask ((__v64qi) __A, - (__v64qi) __B, - (__v64qi) __W, - (__mmask64) __U); + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_shuffle_epi8(__A, __B), + (__v64qi)__W); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_maskz_shuffle_epi8 (__mmask64 __U, __m512i __A, __m512i __B) +_mm512_maskz_shuffle_epi8(__mmask64 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_pshufb512_mask ((__v64qi) __A, - (__v64qi) __B, - (__v64qi) _mm512_setzero_qi (), - (__mmask64) __U); + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_shuffle_epi8(__A, __B), + (__v64qi)_mm512_setzero_qi()); } static __inline__ __m512i __DEFAULT_FN_ATTRS @@ -1057,7 +1034,7 @@ _mm512_subs_epi8 (__m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_psubsb512_mask ((__v64qi) __A, (__v64qi) __B, - (__v64qi) _mm512_setzero_qi (), + (__v64qi) _mm512_setzero_qi(), (__mmask64) -1); } @@ -1076,7 +1053,7 @@ _mm512_maskz_subs_epi8 (__mmask64 __U, __m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_psubsb512_mask ((__v64qi) __A, (__v64qi) __B, - (__v64qi) _mm512_setzero_qi (), + (__v64qi) _mm512_setzero_qi(), (__mmask64) __U); } @@ -1085,7 +1062,7 @@ _mm512_subs_epi16 (__m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_psubsw512_mask ((__v32hi) __A, (__v32hi) __B, - (__v32hi) _mm512_setzero_hi (), + (__v32hi) _mm512_setzero_hi(), (__mmask32) -1); } @@ -1104,7 +1081,7 @@ _mm512_maskz_subs_epi16 (__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_psubsw512_mask ((__v32hi) __A, (__v32hi) __B, - (__v32hi) _mm512_setzero_hi (), + (__v32hi) _mm512_setzero_hi(), (__mmask32) __U); } @@ -1113,7 +1090,7 @@ _mm512_subs_epu8 (__m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_psubusb512_mask ((__v64qi) __A, (__v64qi) __B, - (__v64qi) _mm512_setzero_qi (), + (__v64qi) _mm512_setzero_qi(), (__mmask64) -1); } @@ -1132,7 +1109,7 @@ _mm512_maskz_subs_epu8 (__mmask64 __U, __m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_psubusb512_mask ((__v64qi) __A, (__v64qi) __B, - (__v64qi) _mm512_setzero_qi (), + (__v64qi) _mm512_setzero_qi(), (__mmask64) __U); } @@ -1141,7 +1118,7 @@ _mm512_subs_epu16 (__m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A, (__v32hi) __B, - (__v32hi) _mm512_setzero_hi (), + (__v32hi) _mm512_setzero_hi(), (__mmask32) -1); } @@ -1160,7 +1137,7 @@ _mm512_maskz_subs_epu16 (__mmask32 __U, __m512i __A, __m512i __B) { return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A, (__v32hi) __B, - (__v32hi) _mm512_setzero_hi (), + (__v32hi) _mm512_setzero_hi(), (__mmask32) __U); } @@ -1204,45 +1181,1178 @@ _mm512_maskz_permutex2var_epi16 (__mmask32 __U, __m512i __A, (__mmask32) __U); } +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mulhrs_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) _mm512_setzero_hi(), + (__mmask32) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_mulhrs_epi16 (__m512i __W, __mmask32 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_mulhrs_epi16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) _mm512_setzero_hi(), + (__mmask32) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mulhi_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmulhw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) _mm512_setzero_hi(), + (__mmask32) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_mulhi_epi16 (__m512i __W, __mmask32 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_pmulhw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_mulhi_epi16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmulhw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) _mm512_setzero_hi(), + (__mmask32) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mulhi_epu16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmulhuw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) _mm512_setzero_hi(), + (__mmask32) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_mulhi_epu16 (__m512i __W, __mmask32 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_pmulhuw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_mulhi_epu16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmulhuw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) _mm512_setzero_hi(), + (__mmask32) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maddubs_epi16 (__m512i __X, __m512i __Y) { + return (__m512i) __builtin_ia32_pmaddubsw512_mask ((__v64qi) __X, + (__v64qi) __Y, + (__v32hi) _mm512_setzero_hi(), + (__mmask32) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_maddubs_epi16 (__m512i __W, __mmask32 __U, __m512i __X, + __m512i __Y) { + return (__m512i) __builtin_ia32_pmaddubsw512_mask ((__v64qi) __X, + (__v64qi) __Y, + (__v32hi) __W, + (__mmask32) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_maddubs_epi16 (__mmask32 __U, __m512i __X, __m512i __Y) { + return (__m512i) __builtin_ia32_pmaddubsw512_mask ((__v64qi) __X, + (__v64qi) __Y, + (__v32hi) _mm512_setzero_hi(), + (__mmask32) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_madd_epi16 (__m512i __A, __m512i __B) { + return (__m512i) __builtin_ia32_pmaddwd512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v16si) _mm512_setzero_si512(), + (__mmask16) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_madd_epi16 (__m512i __W, __mmask16 __U, __m512i __A, + __m512i __B) { + return (__m512i) __builtin_ia32_pmaddwd512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v16si) __W, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_madd_epi16 (__mmask16 __U, __m512i __A, __m512i __B) { + return (__m512i) __builtin_ia32_pmaddwd512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v16si) _mm512_setzero_si512(), + (__mmask16) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_cvtsepi16_epi8 (__m512i __A) { + return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A, + (__v32qi)_mm256_setzero_si256(), + (__mmask32) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_mask_cvtsepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) { + return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A, + (__v32qi)__O, + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtsepi16_epi8 (__mmask32 __M, __m512i __A) { + return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A, + (__v32qi) _mm256_setzero_si256(), + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_cvtusepi16_epi8 (__m512i __A) { + return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A, + (__v32qi) _mm256_setzero_si256(), + (__mmask32) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_mask_cvtusepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) { + return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A, + (__v32qi) __O, + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtusepi16_epi8 (__mmask32 __M, __m512i __A) { + return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A, + (__v32qi) _mm256_setzero_si256(), + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_cvtepi16_epi8 (__m512i __A) { + return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A, + (__v32qi) _mm256_setzero_si256(), + (__mmask32) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_mask_cvtepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) { + return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A, + (__v32qi) __O, + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepi16_epi8 (__mmask32 __M, __m512i __A) { + return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A, + (__v32qi) _mm256_setzero_si256(), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_cvtepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A) +{ + __builtin_ia32_pmovwb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A) +{ + __builtin_ia32_pmovswb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A) +{ + __builtin_ia32_pmovuswb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_unpackhi_epi8(__m512i __A, __m512i __B) { + return (__m512i)__builtin_shufflevector((__v64qi)__A, (__v64qi)__B, + 8, 64+8, 9, 64+9, + 10, 64+10, 11, 64+11, + 12, 64+12, 13, 64+13, + 14, 64+14, 15, 64+15, + 24, 64+24, 25, 64+25, + 26, 64+26, 27, 64+27, + 28, 64+28, 29, 64+29, + 30, 64+30, 31, 64+31, + 40, 64+40, 41, 64+41, + 42, 64+42, 43, 64+43, + 44, 64+44, 45, 64+45, + 46, 64+46, 47, 64+47, + 56, 64+56, 57, 64+57, + 58, 64+58, 59, 64+59, + 60, 64+60, 61, 64+61, + 62, 64+62, 63, 64+63); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_unpackhi_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_unpackhi_epi8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_unpackhi_epi8(__mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_unpackhi_epi8(__A, __B), + (__v64qi)_mm512_setzero_qi()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_unpackhi_epi16(__m512i __A, __m512i __B) { + return (__m512i)__builtin_shufflevector((__v32hi)__A, (__v32hi)__B, + 4, 32+4, 5, 32+5, + 6, 32+6, 7, 32+7, + 12, 32+12, 13, 32+13, + 14, 32+14, 15, 32+15, + 20, 32+20, 21, 32+21, + 22, 32+22, 23, 32+23, + 28, 32+28, 29, 32+29, + 30, 32+30, 31, 32+31); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_unpackhi_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_unpackhi_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_unpackhi_epi16(__mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_unpackhi_epi16(__A, __B), + (__v32hi)_mm512_setzero_hi()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_unpacklo_epi8(__m512i __A, __m512i __B) { + return (__m512i)__builtin_shufflevector((__v64qi)__A, (__v64qi)__B, + 0, 64+0, 1, 64+1, + 2, 64+2, 3, 64+3, + 4, 64+4, 5, 64+5, + 6, 64+6, 7, 64+7, + 16, 64+16, 17, 64+17, + 18, 64+18, 19, 64+19, + 20, 64+20, 21, 64+21, + 22, 64+22, 23, 64+23, + 32, 64+32, 33, 64+33, + 34, 64+34, 35, 64+35, + 36, 64+36, 37, 64+37, + 38, 64+38, 39, 64+39, + 48, 64+48, 49, 64+49, + 50, 64+50, 51, 64+51, + 52, 64+52, 53, 64+53, + 54, 64+54, 55, 64+55); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_unpacklo_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_unpacklo_epi8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_unpacklo_epi8(__mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_unpacklo_epi8(__A, __B), + (__v64qi)_mm512_setzero_qi()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_unpacklo_epi16(__m512i __A, __m512i __B) { + return (__m512i)__builtin_shufflevector((__v32hi)__A, (__v32hi)__B, + 0, 32+0, 1, 32+1, + 2, 32+2, 3, 32+3, + 8, 32+8, 9, 32+9, + 10, 32+10, 11, 32+11, + 16, 32+16, 17, 32+17, + 18, 32+18, 19, 32+19, + 24, 32+24, 25, 32+25, + 26, 32+26, 27, 32+27); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_unpacklo_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_unpacklo_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_unpacklo_epi16(__mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_unpacklo_epi16(__A, __B), + (__v32hi)_mm512_setzero_hi()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_cvtepi8_epi16(__m256i __A) +{ + /* This function always performs a signed extension, but __v32qi is a char + which may be signed or unsigned, so use __v32qs. */ + return (__m512i)__builtin_convertvector((__v32qs)__A, __v32hi); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvtepi8_epi16(__m512i __W, __mmask32 __U, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_cvtepi8_epi16(__A), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepi8_epi16(__mmask32 __U, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_cvtepi8_epi16(__A), + (__v32hi)_mm512_setzero_hi()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_cvtepu8_epi16(__m256i __A) +{ + return (__m512i)__builtin_convertvector((__v32qu)__A, __v32hi); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvtepu8_epi16(__m512i __W, __mmask32 __U, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_cvtepu8_epi16(__A), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepu8_epi16(__mmask32 __U, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_cvtepu8_epi16(__A), + (__v32hi)_mm512_setzero_hi()); +} + + #define _mm512_cmp_epi8_mask(a, b, p) __extension__ ({ \ - (__mmask16)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \ - (__v64qi)(__m512i)(b), \ - (p), (__mmask64)-1); }) + (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \ + (__v64qi)(__m512i)(b), (int)(p), \ + (__mmask64)-1); }) #define _mm512_mask_cmp_epi8_mask(m, a, b, p) __extension__ ({ \ - (__mmask16)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \ - (__v64qi)(__m512i)(b), \ - (p), (__mmask64)(m)); }) + (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \ + (__v64qi)(__m512i)(b), (int)(p), \ + (__mmask64)(m)); }) #define _mm512_cmp_epu8_mask(a, b, p) __extension__ ({ \ - (__mmask16)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \ - (__v64qi)(__m512i)(b), \ - (p), (__mmask64)-1); }) + (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \ + (__v64qi)(__m512i)(b), (int)(p), \ + (__mmask64)-1); }) #define _mm512_mask_cmp_epu8_mask(m, a, b, p) __extension__ ({ \ - (__mmask16)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \ - (__v64qi)(__m512i)(b), \ - (p), (__mmask64)(m)); }) + (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \ + (__v64qi)(__m512i)(b), (int)(p), \ + (__mmask64)(m)); }) #define _mm512_cmp_epi16_mask(a, b, p) __extension__ ({ \ - (__mmask16)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \ - (__v32hi)(__m512i)(b), \ - (p), (__mmask32)-1); }) + (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \ + (__v32hi)(__m512i)(b), (int)(p), \ + (__mmask32)-1); }) #define _mm512_mask_cmp_epi16_mask(m, a, b, p) __extension__ ({ \ - (__mmask16)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \ - (__v32hi)(__m512i)(b), \ - (p), (__mmask32)(m)); }) + (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \ + (__v32hi)(__m512i)(b), (int)(p), \ + (__mmask32)(m)); }) #define _mm512_cmp_epu16_mask(a, b, p) __extension__ ({ \ - (__mmask16)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \ - (__v32hi)(__m512i)(b), \ - (p), (__mmask32)-1); }) + (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \ + (__v32hi)(__m512i)(b), (int)(p), \ + (__mmask32)-1); }) #define _mm512_mask_cmp_epu16_mask(m, a, b, p) __extension__ ({ \ - (__mmask16)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \ - (__v32hi)(__m512i)(b), \ - (p), (__mmask32)(m)); }) + (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \ + (__v32hi)(__m512i)(b), (int)(p), \ + (__mmask32)(m)); }) + +#define _mm512_shufflehi_epi16(A, imm) __extension__ ({ \ + (__m512i)__builtin_shufflevector((__v32hi)(__m512i)(A), \ + (__v32hi)_mm512_undefined_epi32(), \ + 0, 1, 2, 3, \ + 4 + (((imm) >> 0) & 0x3), \ + 4 + (((imm) >> 2) & 0x3), \ + 4 + (((imm) >> 4) & 0x3), \ + 4 + (((imm) >> 6) & 0x3), \ + 8, 9, 10, 11, \ + 12 + (((imm) >> 0) & 0x3), \ + 12 + (((imm) >> 2) & 0x3), \ + 12 + (((imm) >> 4) & 0x3), \ + 12 + (((imm) >> 6) & 0x3), \ + 16, 17, 18, 19, \ + 20 + (((imm) >> 0) & 0x3), \ + 20 + (((imm) >> 2) & 0x3), \ + 20 + (((imm) >> 4) & 0x3), \ + 20 + (((imm) >> 6) & 0x3), \ + 24, 25, 26, 27, \ + 28 + (((imm) >> 0) & 0x3), \ + 28 + (((imm) >> 2) & 0x3), \ + 28 + (((imm) >> 4) & 0x3), \ + 28 + (((imm) >> 6) & 0x3)); }) + +#define _mm512_mask_shufflehi_epi16(W, U, A, imm) __extension__ ({ \ + (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \ + (__v32hi)_mm512_shufflehi_epi16((A), \ + (imm)), \ + (__v32hi)(__m512i)(W)); }) + +#define _mm512_maskz_shufflehi_epi16(U, A, imm) __extension__ ({ \ + (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \ + (__v32hi)_mm512_shufflehi_epi16((A), \ + (imm)), \ + (__v32hi)_mm512_setzero_hi()); }) + +#define _mm512_shufflelo_epi16(A, imm) __extension__ ({ \ + (__m512i)__builtin_shufflevector((__v32hi)(__m512i)(A), \ + (__v32hi)_mm512_undefined_epi32(), \ + 0 + (((imm) >> 0) & 0x3), \ + 0 + (((imm) >> 2) & 0x3), \ + 0 + (((imm) >> 4) & 0x3), \ + 0 + (((imm) >> 6) & 0x3), \ + 4, 5, 6, 7, \ + 8 + (((imm) >> 0) & 0x3), \ + 8 + (((imm) >> 2) & 0x3), \ + 8 + (((imm) >> 4) & 0x3), \ + 8 + (((imm) >> 6) & 0x3), \ + 12, 13, 14, 15, \ + 16 + (((imm) >> 0) & 0x3), \ + 16 + (((imm) >> 2) & 0x3), \ + 16 + (((imm) >> 4) & 0x3), \ + 16 + (((imm) >> 6) & 0x3), \ + 20, 21, 22, 23, \ + 24 + (((imm) >> 0) & 0x3), \ + 24 + (((imm) >> 2) & 0x3), \ + 24 + (((imm) >> 4) & 0x3), \ + 24 + (((imm) >> 6) & 0x3), \ + 28, 29, 30, 31); }) + + +#define _mm512_mask_shufflelo_epi16(W, U, A, imm) __extension__ ({ \ + (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \ + (__v32hi)_mm512_shufflelo_epi16((A), \ + (imm)), \ + (__v32hi)(__m512i)(W)); }) + + +#define _mm512_maskz_shufflelo_epi16(U, A, imm) __extension__ ({ \ + (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \ + (__v32hi)_mm512_shufflelo_epi16((A), \ + (imm)), \ + (__v32hi)_mm512_setzero_hi()); }) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_sllv_epi16(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_psllv32hi((__v32hi) __A, (__v32hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_sllv_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_sllv_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_sllv_epi16(__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_sllv_epi16(__A, __B), + (__v32hi)_mm512_setzero_hi()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_sll_epi16(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_psllw512((__v32hi) __A, (__v8hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_sll_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_sll_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_sll_epi16(__mmask32 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_sll_epi16(__A, __B), + (__v32hi)_mm512_setzero_hi()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_slli_epi16(__m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_psllwi512((__v32hi)__A, __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_slli_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_slli_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_slli_epi16(__mmask32 __U, __m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_slli_epi16(__A, __B), + (__v32hi)_mm512_setzero_hi()); +} + +#define _mm512_bslli_epi128(a, imm) __extension__ ({ \ + (__m512i)__builtin_shufflevector( \ + (__v64qi)_mm512_setzero_si512(), \ + (__v64qi)(__m512i)(a), \ + ((char)(imm)&0xF0) ? 0 : ((char)(imm)>0x0 ? 16 : 64) - (char)(imm), \ + ((char)(imm)&0xF0) ? 1 : ((char)(imm)>0x1 ? 17 : 65) - (char)(imm), \ + ((char)(imm)&0xF0) ? 2 : ((char)(imm)>0x2 ? 18 : 66) - (char)(imm), \ + ((char)(imm)&0xF0) ? 3 : ((char)(imm)>0x3 ? 19 : 67) - (char)(imm), \ + ((char)(imm)&0xF0) ? 4 : ((char)(imm)>0x4 ? 20 : 68) - (char)(imm), \ + ((char)(imm)&0xF0) ? 5 : ((char)(imm)>0x5 ? 21 : 69) - (char)(imm), \ + ((char)(imm)&0xF0) ? 6 : ((char)(imm)>0x6 ? 22 : 70) - (char)(imm), \ + ((char)(imm)&0xF0) ? 7 : ((char)(imm)>0x7 ? 23 : 71) - (char)(imm), \ + ((char)(imm)&0xF0) ? 8 : ((char)(imm)>0x8 ? 24 : 72) - (char)(imm), \ + ((char)(imm)&0xF0) ? 9 : ((char)(imm)>0x9 ? 25 : 73) - (char)(imm), \ + ((char)(imm)&0xF0) ? 10 : ((char)(imm)>0xA ? 26 : 74) - (char)(imm), \ + ((char)(imm)&0xF0) ? 11 : ((char)(imm)>0xB ? 27 : 75) - (char)(imm), \ + ((char)(imm)&0xF0) ? 12 : ((char)(imm)>0xC ? 28 : 76) - (char)(imm), \ + ((char)(imm)&0xF0) ? 13 : ((char)(imm)>0xD ? 29 : 77) - (char)(imm), \ + ((char)(imm)&0xF0) ? 14 : ((char)(imm)>0xE ? 30 : 78) - (char)(imm), \ + ((char)(imm)&0xF0) ? 15 : ((char)(imm)>0xF ? 31 : 79) - (char)(imm), \ + ((char)(imm)&0xF0) ? 16 : ((char)(imm)>0x0 ? 32 : 80) - (char)(imm), \ + ((char)(imm)&0xF0) ? 17 : ((char)(imm)>0x1 ? 33 : 81) - (char)(imm), \ + ((char)(imm)&0xF0) ? 18 : ((char)(imm)>0x2 ? 34 : 82) - (char)(imm), \ + ((char)(imm)&0xF0) ? 19 : ((char)(imm)>0x3 ? 35 : 83) - (char)(imm), \ + ((char)(imm)&0xF0) ? 20 : ((char)(imm)>0x4 ? 36 : 84) - (char)(imm), \ + ((char)(imm)&0xF0) ? 21 : ((char)(imm)>0x5 ? 37 : 85) - (char)(imm), \ + ((char)(imm)&0xF0) ? 22 : ((char)(imm)>0x6 ? 38 : 86) - (char)(imm), \ + ((char)(imm)&0xF0) ? 23 : ((char)(imm)>0x7 ? 39 : 87) - (char)(imm), \ + ((char)(imm)&0xF0) ? 24 : ((char)(imm)>0x8 ? 40 : 88) - (char)(imm), \ + ((char)(imm)&0xF0) ? 25 : ((char)(imm)>0x9 ? 41 : 89) - (char)(imm), \ + ((char)(imm)&0xF0) ? 26 : ((char)(imm)>0xA ? 42 : 90) - (char)(imm), \ + ((char)(imm)&0xF0) ? 27 : ((char)(imm)>0xB ? 43 : 91) - (char)(imm), \ + ((char)(imm)&0xF0) ? 28 : ((char)(imm)>0xC ? 44 : 92) - (char)(imm), \ + ((char)(imm)&0xF0) ? 29 : ((char)(imm)>0xD ? 45 : 93) - (char)(imm), \ + ((char)(imm)&0xF0) ? 30 : ((char)(imm)>0xE ? 46 : 94) - (char)(imm), \ + ((char)(imm)&0xF0) ? 31 : ((char)(imm)>0xF ? 47 : 95) - (char)(imm), \ + ((char)(imm)&0xF0) ? 32 : ((char)(imm)>0x0 ? 48 : 96) - (char)(imm), \ + ((char)(imm)&0xF0) ? 33 : ((char)(imm)>0x1 ? 49 : 97) - (char)(imm), \ + ((char)(imm)&0xF0) ? 34 : ((char)(imm)>0x2 ? 50 : 98) - (char)(imm), \ + ((char)(imm)&0xF0) ? 35 : ((char)(imm)>0x3 ? 51 : 99) - (char)(imm), \ + ((char)(imm)&0xF0) ? 36 : ((char)(imm)>0x4 ? 52 : 100) - (char)(imm), \ + ((char)(imm)&0xF0) ? 37 : ((char)(imm)>0x5 ? 53 : 101) - (char)(imm), \ + ((char)(imm)&0xF0) ? 38 : ((char)(imm)>0x6 ? 54 : 102) - (char)(imm), \ + ((char)(imm)&0xF0) ? 39 : ((char)(imm)>0x7 ? 55 : 103) - (char)(imm), \ + ((char)(imm)&0xF0) ? 40 : ((char)(imm)>0x8 ? 56 : 104) - (char)(imm), \ + ((char)(imm)&0xF0) ? 41 : ((char)(imm)>0x9 ? 57 : 105) - (char)(imm), \ + ((char)(imm)&0xF0) ? 42 : ((char)(imm)>0xA ? 58 : 106) - (char)(imm), \ + ((char)(imm)&0xF0) ? 43 : ((char)(imm)>0xB ? 59 : 107) - (char)(imm), \ + ((char)(imm)&0xF0) ? 44 : ((char)(imm)>0xC ? 60 : 108) - (char)(imm), \ + ((char)(imm)&0xF0) ? 45 : ((char)(imm)>0xD ? 61 : 109) - (char)(imm), \ + ((char)(imm)&0xF0) ? 46 : ((char)(imm)>0xE ? 62 : 110) - (char)(imm), \ + ((char)(imm)&0xF0) ? 47 : ((char)(imm)>0xF ? 63 : 111) - (char)(imm), \ + ((char)(imm)&0xF0) ? 48 : ((char)(imm)>0x0 ? 64 : 112) - (char)(imm), \ + ((char)(imm)&0xF0) ? 49 : ((char)(imm)>0x1 ? 65 : 113) - (char)(imm), \ + ((char)(imm)&0xF0) ? 50 : ((char)(imm)>0x2 ? 66 : 114) - (char)(imm), \ + ((char)(imm)&0xF0) ? 51 : ((char)(imm)>0x3 ? 67 : 115) - (char)(imm), \ + ((char)(imm)&0xF0) ? 52 : ((char)(imm)>0x4 ? 68 : 116) - (char)(imm), \ + ((char)(imm)&0xF0) ? 53 : ((char)(imm)>0x5 ? 69 : 117) - (char)(imm), \ + ((char)(imm)&0xF0) ? 54 : ((char)(imm)>0x6 ? 70 : 118) - (char)(imm), \ + ((char)(imm)&0xF0) ? 55 : ((char)(imm)>0x7 ? 71 : 119) - (char)(imm), \ + ((char)(imm)&0xF0) ? 56 : ((char)(imm)>0x8 ? 72 : 120) - (char)(imm), \ + ((char)(imm)&0xF0) ? 57 : ((char)(imm)>0x9 ? 73 : 121) - (char)(imm), \ + ((char)(imm)&0xF0) ? 58 : ((char)(imm)>0xA ? 74 : 122) - (char)(imm), \ + ((char)(imm)&0xF0) ? 59 : ((char)(imm)>0xB ? 75 : 123) - (char)(imm), \ + ((char)(imm)&0xF0) ? 60 : ((char)(imm)>0xC ? 76 : 124) - (char)(imm), \ + ((char)(imm)&0xF0) ? 61 : ((char)(imm)>0xD ? 77 : 125) - (char)(imm), \ + ((char)(imm)&0xF0) ? 62 : ((char)(imm)>0xE ? 78 : 126) - (char)(imm), \ + ((char)(imm)&0xF0) ? 63 : ((char)(imm)>0xF ? 79 : 127) - (char)(imm)); }) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_srlv_epi16(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_psrlv32hi((__v32hi)__A, (__v32hi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_srlv_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srlv_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_srlv_epi16(__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srlv_epi16(__A, __B), + (__v32hi)_mm512_setzero_hi()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_srav_epi16(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_psrav32hi((__v32hi)__A, (__v32hi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_srav_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srav_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_srav_epi16(__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srav_epi16(__A, __B), + (__v32hi)_mm512_setzero_hi()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_sra_epi16(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_psraw512((__v32hi) __A, (__v8hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_sra_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_sra_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_sra_epi16(__mmask32 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_sra_epi16(__A, __B), + (__v32hi)_mm512_setzero_hi()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_srai_epi16(__m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_psrawi512((__v32hi)__A, __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_srai_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srai_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_srai_epi16(__mmask32 __U, __m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srai_epi16(__A, __B), + (__v32hi)_mm512_setzero_hi()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_srl_epi16(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_psrlw512((__v32hi) __A, (__v8hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_srl_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srl_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_srl_epi16(__mmask32 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srl_epi16(__A, __B), + (__v32hi)_mm512_setzero_hi()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_srli_epi16(__m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_psrlwi512((__v32hi)__A, __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_srli_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srli_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_srli_epi16(__mmask32 __U, __m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srli_epi16(__A, __B), + (__v32hi)_mm512_setzero_hi()); +} + +#define _mm512_bsrli_epi128(a, imm) __extension__ ({ \ + (__m512i)__builtin_shufflevector( \ + (__v64qi)(__m512i)(a), \ + (__v64qi)_mm512_setzero_si512(), \ + ((char)(imm)&0xF0) ? 64 : (char)(imm) + ((char)(imm)>0xF ? 48 : 0), \ + ((char)(imm)&0xF0) ? 65 : (char)(imm) + ((char)(imm)>0xE ? 49 : 1), \ + ((char)(imm)&0xF0) ? 66 : (char)(imm) + ((char)(imm)>0xD ? 50 : 2), \ + ((char)(imm)&0xF0) ? 67 : (char)(imm) + ((char)(imm)>0xC ? 51 : 3), \ + ((char)(imm)&0xF0) ? 68 : (char)(imm) + ((char)(imm)>0xB ? 52 : 4), \ + ((char)(imm)&0xF0) ? 69 : (char)(imm) + ((char)(imm)>0xA ? 53 : 5), \ + ((char)(imm)&0xF0) ? 70 : (char)(imm) + ((char)(imm)>0x9 ? 54 : 6), \ + ((char)(imm)&0xF0) ? 71 : (char)(imm) + ((char)(imm)>0x8 ? 55 : 7), \ + ((char)(imm)&0xF0) ? 72 : (char)(imm) + ((char)(imm)>0x7 ? 56 : 8), \ + ((char)(imm)&0xF0) ? 73 : (char)(imm) + ((char)(imm)>0x6 ? 57 : 9), \ + ((char)(imm)&0xF0) ? 74 : (char)(imm) + ((char)(imm)>0x5 ? 58 : 10), \ + ((char)(imm)&0xF0) ? 75 : (char)(imm) + ((char)(imm)>0x4 ? 59 : 11), \ + ((char)(imm)&0xF0) ? 76 : (char)(imm) + ((char)(imm)>0x3 ? 60 : 12), \ + ((char)(imm)&0xF0) ? 77 : (char)(imm) + ((char)(imm)>0x2 ? 61 : 13), \ + ((char)(imm)&0xF0) ? 78 : (char)(imm) + ((char)(imm)>0x1 ? 62 : 14), \ + ((char)(imm)&0xF0) ? 79 : (char)(imm) + ((char)(imm)>0x0 ? 63 : 15), \ + ((char)(imm)&0xF0) ? 80 : (char)(imm) + ((char)(imm)>0xF ? 64 : 16), \ + ((char)(imm)&0xF0) ? 81 : (char)(imm) + ((char)(imm)>0xE ? 65 : 17), \ + ((char)(imm)&0xF0) ? 82 : (char)(imm) + ((char)(imm)>0xD ? 66 : 18), \ + ((char)(imm)&0xF0) ? 83 : (char)(imm) + ((char)(imm)>0xC ? 67 : 19), \ + ((char)(imm)&0xF0) ? 84 : (char)(imm) + ((char)(imm)>0xB ? 68 : 20), \ + ((char)(imm)&0xF0) ? 85 : (char)(imm) + ((char)(imm)>0xA ? 69 : 21), \ + ((char)(imm)&0xF0) ? 86 : (char)(imm) + ((char)(imm)>0x9 ? 70 : 22), \ + ((char)(imm)&0xF0) ? 87 : (char)(imm) + ((char)(imm)>0x8 ? 71 : 23), \ + ((char)(imm)&0xF0) ? 88 : (char)(imm) + ((char)(imm)>0x7 ? 72 : 24), \ + ((char)(imm)&0xF0) ? 89 : (char)(imm) + ((char)(imm)>0x6 ? 73 : 25), \ + ((char)(imm)&0xF0) ? 90 : (char)(imm) + ((char)(imm)>0x5 ? 74 : 26), \ + ((char)(imm)&0xF0) ? 91 : (char)(imm) + ((char)(imm)>0x4 ? 75 : 27), \ + ((char)(imm)&0xF0) ? 92 : (char)(imm) + ((char)(imm)>0x3 ? 76 : 28), \ + ((char)(imm)&0xF0) ? 93 : (char)(imm) + ((char)(imm)>0x2 ? 77 : 29), \ + ((char)(imm)&0xF0) ? 94 : (char)(imm) + ((char)(imm)>0x1 ? 78 : 30), \ + ((char)(imm)&0xF0) ? 95 : (char)(imm) + ((char)(imm)>0x0 ? 79 : 31), \ + ((char)(imm)&0xF0) ? 96 : (char)(imm) + ((char)(imm)>0xF ? 80 : 32), \ + ((char)(imm)&0xF0) ? 97 : (char)(imm) + ((char)(imm)>0xE ? 81 : 33), \ + ((char)(imm)&0xF0) ? 98 : (char)(imm) + ((char)(imm)>0xD ? 82 : 34), \ + ((char)(imm)&0xF0) ? 99 : (char)(imm) + ((char)(imm)>0xC ? 83 : 35), \ + ((char)(imm)&0xF0) ? 100 : (char)(imm) + ((char)(imm)>0xB ? 84 : 36), \ + ((char)(imm)&0xF0) ? 101 : (char)(imm) + ((char)(imm)>0xA ? 85 : 37), \ + ((char)(imm)&0xF0) ? 102 : (char)(imm) + ((char)(imm)>0x9 ? 86 : 38), \ + ((char)(imm)&0xF0) ? 103 : (char)(imm) + ((char)(imm)>0x8 ? 87 : 39), \ + ((char)(imm)&0xF0) ? 104 : (char)(imm) + ((char)(imm)>0x7 ? 88 : 40), \ + ((char)(imm)&0xF0) ? 105 : (char)(imm) + ((char)(imm)>0x6 ? 89 : 41), \ + ((char)(imm)&0xF0) ? 106 : (char)(imm) + ((char)(imm)>0x5 ? 90 : 42), \ + ((char)(imm)&0xF0) ? 107 : (char)(imm) + ((char)(imm)>0x4 ? 91 : 43), \ + ((char)(imm)&0xF0) ? 108 : (char)(imm) + ((char)(imm)>0x3 ? 92 : 44), \ + ((char)(imm)&0xF0) ? 109 : (char)(imm) + ((char)(imm)>0x2 ? 93 : 45), \ + ((char)(imm)&0xF0) ? 110 : (char)(imm) + ((char)(imm)>0x1 ? 94 : 46), \ + ((char)(imm)&0xF0) ? 111 : (char)(imm) + ((char)(imm)>0x0 ? 95 : 47), \ + ((char)(imm)&0xF0) ? 112 : (char)(imm) + ((char)(imm)>0xF ? 96 : 48), \ + ((char)(imm)&0xF0) ? 113 : (char)(imm) + ((char)(imm)>0xE ? 97 : 49), \ + ((char)(imm)&0xF0) ? 114 : (char)(imm) + ((char)(imm)>0xD ? 98 : 50), \ + ((char)(imm)&0xF0) ? 115 : (char)(imm) + ((char)(imm)>0xC ? 99 : 51), \ + ((char)(imm)&0xF0) ? 116 : (char)(imm) + ((char)(imm)>0xB ? 100 : 52), \ + ((char)(imm)&0xF0) ? 117 : (char)(imm) + ((char)(imm)>0xA ? 101 : 53), \ + ((char)(imm)&0xF0) ? 118 : (char)(imm) + ((char)(imm)>0x9 ? 102 : 54), \ + ((char)(imm)&0xF0) ? 119 : (char)(imm) + ((char)(imm)>0x8 ? 103 : 55), \ + ((char)(imm)&0xF0) ? 120 : (char)(imm) + ((char)(imm)>0x7 ? 104 : 56), \ + ((char)(imm)&0xF0) ? 121 : (char)(imm) + ((char)(imm)>0x6 ? 105 : 57), \ + ((char)(imm)&0xF0) ? 122 : (char)(imm) + ((char)(imm)>0x5 ? 106 : 58), \ + ((char)(imm)&0xF0) ? 123 : (char)(imm) + ((char)(imm)>0x4 ? 107 : 59), \ + ((char)(imm)&0xF0) ? 124 : (char)(imm) + ((char)(imm)>0x3 ? 108 : 60), \ + ((char)(imm)&0xF0) ? 125 : (char)(imm) + ((char)(imm)>0x2 ? 109 : 61), \ + ((char)(imm)&0xF0) ? 126 : (char)(imm) + ((char)(imm)>0x1 ? 110 : 62), \ + ((char)(imm)&0xF0) ? 127 : (char)(imm) + ((char)(imm)>0x0 ? 111 : 63)); }) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_mov_epi16 (__m512i __W, __mmask32 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U, + (__v32hi) __A, + (__v32hi) __W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_mov_epi16 (__mmask32 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U, + (__v32hi) __A, + (__v32hi) _mm512_setzero_hi ()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_mov_epi8 (__m512i __W, __mmask64 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U, + (__v64qi) __A, + (__v64qi) __W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_mov_epi8 (__mmask64 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U, + (__v64qi) __A, + (__v64qi) _mm512_setzero_hi ()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_set1_epi8 (__m512i __O, __mmask64 __M, char __A) +{ + return (__m512i) __builtin_ia32_pbroadcastb512_gpr_mask (__A, + (__v64qi) __O, + __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_set1_epi8 (__mmask64 __M, char __A) +{ + return (__m512i) __builtin_ia32_pbroadcastb512_gpr_mask (__A, + (__v64qi) + _mm512_setzero_qi(), + __M); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS +_mm512_kunpackd (__mmask64 __A, __mmask64 __B) +{ + return (__mmask64) __builtin_ia32_kunpckdi ((__mmask64) __A, + (__mmask64) __B); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_mm512_kunpackw (__mmask32 __A, __mmask32 __B) +{ + return (__mmask32) __builtin_ia32_kunpcksi ((__mmask32) __A, + (__mmask32) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_loadu_epi16 (__m512i __W, __mmask32 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddquhi512_mask ((__v32hi *) __P, + (__v32hi) __W, + (__mmask32) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_loadu_epi16 (__mmask32 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddquhi512_mask ((__v32hi *) __P, + (__v32hi) + _mm512_setzero_hi (), + (__mmask32) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_loadu_epi8 (__m512i __W, __mmask64 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddquqi512_mask ((__v64qi *) __P, + (__v64qi) __W, + (__mmask64) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_loadu_epi8 (__mmask64 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddquqi512_mask ((__v64qi *) __P, + (__v64qi) + _mm512_setzero_hi (), + (__mmask64) __U); +} +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_storeu_epi16 (void *__P, __mmask32 __U, __m512i __A) +{ + __builtin_ia32_storedquhi512_mask ((__v32hi *) __P, + (__v32hi) __A, + (__mmask32) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_storeu_epi8 (void *__P, __mmask64 __U, __m512i __A) +{ + __builtin_ia32_storedquqi512_mask ((__v64qi *) __P, + (__v64qi) __A, + (__mmask64) __U); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS +_mm512_test_epi8_mask (__m512i __A, __m512i __B) +{ + return (__mmask64) __builtin_ia32_ptestmb512 ((__v64qi) __A, + (__v64qi) __B, + (__mmask64) -1); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS +_mm512_mask_test_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__mmask64) __builtin_ia32_ptestmb512 ((__v64qi) __A, + (__v64qi) __B, __U); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_mm512_test_epi16_mask (__m512i __A, __m512i __B) +{ + return (__mmask32) __builtin_ia32_ptestmw512 ((__v32hi) __A, + (__v32hi) __B, + (__mmask32) -1); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_mm512_mask_test_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__mmask32) __builtin_ia32_ptestmw512 ((__v32hi) __A, + (__v32hi) __B, __U); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS +_mm512_testn_epi8_mask (__m512i __A, __m512i __B) +{ + return (__mmask64) __builtin_ia32_ptestnmb512 ((__v64qi) __A, + (__v64qi) __B, + (__mmask64) -1); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS +_mm512_mask_testn_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__mmask64) __builtin_ia32_ptestnmb512 ((__v64qi) __A, + (__v64qi) __B, __U); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_mm512_testn_epi16_mask (__m512i __A, __m512i __B) +{ + return (__mmask32) __builtin_ia32_ptestnmw512 ((__v32hi) __A, + (__v32hi) __B, + (__mmask32) -1); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_mm512_mask_testn_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__mmask32) __builtin_ia32_ptestnmw512 ((__v32hi) __A, + (__v32hi) __B, __U); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS +_mm512_movepi8_mask (__m512i __A) +{ + return (__mmask64) __builtin_ia32_cvtb2mask512 ((__v64qi) __A); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_mm512_movepi16_mask (__m512i __A) +{ + return (__mmask32) __builtin_ia32_cvtw2mask512 ((__v32hi) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_movm_epi8 (__mmask64 __A) +{ + return (__m512i) __builtin_ia32_cvtmask2b512 (__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_movm_epi16 (__mmask32 __A) +{ + return (__m512i) __builtin_ia32_cvtmask2w512 (__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_broadcastb_epi8 (__m128i __A) +{ + return (__m512i)__builtin_shufflevector((__v16qi) __A, + (__v16qi)_mm_undefined_si128(), + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_broadcastb_epi8 (__m512i __O, __mmask64 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectb_512(__M, + (__v64qi) _mm512_broadcastb_epi8(__A), + (__v64qi) __O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_broadcastb_epi8 (__mmask64 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectb_512(__M, + (__v64qi) _mm512_broadcastb_epi8(__A), + (__v64qi) _mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_set1_epi16 (__m512i __O, __mmask32 __M, short __A) +{ + return (__m512i) __builtin_ia32_pbroadcastw512_gpr_mask (__A, + (__v32hi) __O, + __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_set1_epi16 (__mmask32 __M, short __A) +{ + return (__m512i) __builtin_ia32_pbroadcastw512_gpr_mask (__A, + (__v32hi) _mm512_setzero_hi(), + __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_broadcastw_epi16 (__m128i __A) +{ + return (__m512i)__builtin_shufflevector((__v8hi) __A, + (__v8hi)_mm_undefined_si128(), + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_broadcastw_epi16 (__m512i __O, __mmask32 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectw_512(__M, + (__v32hi) _mm512_broadcastw_epi16(__A), + (__v32hi) __O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_broadcastw_epi16 (__mmask32 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectw_512(__M, + (__v32hi) _mm512_broadcastw_epi16(__A), + (__v32hi) _mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_permutexvar_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_permvarhi512_mask ((__v32hi) __B, + (__v32hi) __A, + (__v32hi) _mm512_undefined_epi32 (), + (__mmask32) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_permutexvar_epi16 (__mmask32 __M, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_permvarhi512_mask ((__v32hi) __B, + (__v32hi) __A, + (__v32hi) _mm512_setzero_hi(), + (__mmask32) __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_permutexvar_epi16 (__m512i __W, __mmask32 __M, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_permvarhi512_mask ((__v32hi) __B, + (__v32hi) __A, + (__v32hi) __W, + (__mmask32) __M); +} + +#define _mm512_alignr_epi8(A, B, N) __extension__ ({\ + (__m512i)__builtin_ia32_palignr512_mask((__v64qi)(__m512i)(A), \ + (__v64qi)(__m512i)(B), (int)(N), \ + (__v64qi)_mm512_undefined_pd(), \ + (__mmask64)-1); }) + +#define _mm512_mask_alignr_epi8(W, U, A, B, N) __extension__({\ + (__m512i)__builtin_ia32_palignr512_mask((__v64qi)(__m512i)(A), \ + (__v64qi)(__m512i)(B), (int)(N), \ + (__v64qi)(__m512i)(W), \ + (__mmask64)(U)); }) + +#define _mm512_maskz_alignr_epi8(U, A, B, N) __extension__({\ + (__m512i)__builtin_ia32_palignr512_mask((__v64qi)(__m512i)(A), \ + (__v64qi)(__m512i)(B), (int)(N), \ + (__v64qi)_mm512_setzero_si512(), \ + (__mmask64)(U)); }) + +#define _mm512_dbsad_epu8(A, B, imm) __extension__ ({\ + (__m512i)__builtin_ia32_dbpsadbw512_mask((__v64qi)(__m512i)(A), \ + (__v64qi)(__m512i)(B), (int)(imm), \ + (__v32hi)_mm512_undefined_epi32(), \ + (__mmask32)-1); }) + +#define _mm512_mask_dbsad_epu8(W, U, A, B, imm) ({\ + (__m512i)__builtin_ia32_dbpsadbw512_mask((__v64qi)(__m512i)(A), \ + (__v64qi)(__m512i)(B), (int)(imm), \ + (__v32hi)(__m512i)(W), \ + (__mmask32)(U)); }) + +#define _mm512_maskz_dbsad_epu8(U, A, B, imm) ({\ + (__m512i)__builtin_ia32_dbpsadbw512_mask((__v64qi)(__m512i)(A), \ + (__v64qi)(__m512i)(B), (int)(imm), \ + (__v32hi)_mm512_setzero_hi(), \ + (__mmask32)(U)); }) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_sad_epu8 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psadbw512 ((__v64qi) __A, + (__v64qi) __B); +} + #undef __DEFAULT_FN_ATTRS diff --git a/c_headers/avx512cdintrin.h b/c_headers/avx512cdintrin.h index 0844b23a1..23c423584 100644 --- a/c_headers/avx512cdintrin.h +++ b/c_headers/avx512cdintrin.h @@ -1,131 +1,144 @@ -/*===------------- avx512cdintrin.h - AVX512CD intrinsics ------------------=== - * - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - * - *===-----------------------------------------------------------------------=== - */ -#ifndef __IMMINTRIN_H -#error "Never use directly; include instead." -#endif - -#ifndef __AVX512CDINTRIN_H -#define __AVX512CDINTRIN_H - -/* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512cd"))) - -static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_conflict_epi64 (__m512i __A) -{ - return (__m512i) __builtin_ia32_vpconflictdi_512_mask ((__v8di) __A, - (__v8di) _mm512_setzero_si512 (), - (__mmask8) -1); -} - -static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_mask_conflict_epi64 (__m512i __W, __mmask8 __U, __m512i __A) -{ - return (__m512i) __builtin_ia32_vpconflictdi_512_mask ((__v8di) __A, - (__v8di) __W, - (__mmask8) __U); -} - -static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_maskz_conflict_epi64 (__mmask8 __U, __m512i __A) -{ - return (__m512i) __builtin_ia32_vpconflictdi_512_mask ((__v8di) __A, - (__v8di) _mm512_setzero_si512 (), - (__mmask8) __U); -} - -static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_conflict_epi32 (__m512i __A) -{ - return (__m512i) __builtin_ia32_vpconflictsi_512_mask ((__v16si) __A, - (__v16si) _mm512_setzero_si512 (), - (__mmask16) -1); -} - -static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_mask_conflict_epi32 (__m512i __W, __mmask16 __U, __m512i __A) -{ - return (__m512i) __builtin_ia32_vpconflictsi_512_mask ((__v16si) __A, - (__v16si) __W, - (__mmask16) __U); -} - -static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_maskz_conflict_epi32 (__mmask16 __U, __m512i __A) -{ - return (__m512i) __builtin_ia32_vpconflictsi_512_mask ((__v16si) __A, - (__v16si) _mm512_setzero_si512 (), - (__mmask16) __U); -} - -static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_lzcnt_epi32 (__m512i __A) -{ - return (__m512i) __builtin_ia32_vplzcntd_512_mask ((__v16si) __A, - (__v16si) _mm512_setzero_si512 (), - (__mmask16) -1); -} - -static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_mask_lzcnt_epi32 (__m512i __W, __mmask16 __U, __m512i __A) -{ - return (__m512i) __builtin_ia32_vplzcntd_512_mask ((__v16si) __A, - (__v16si) __W, - (__mmask16) __U); -} - -static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_maskz_lzcnt_epi32 (__mmask16 __U, __m512i __A) -{ - return (__m512i) __builtin_ia32_vplzcntd_512_mask ((__v16si) __A, - (__v16si) _mm512_setzero_si512 (), - (__mmask16) __U); -} - -static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_lzcnt_epi64 (__m512i __A) -{ - return (__m512i) __builtin_ia32_vplzcntq_512_mask ((__v8di) __A, - (__v8di) _mm512_setzero_si512 (), - (__mmask8) -1); -} - -static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_mask_lzcnt_epi64 (__m512i __W, __mmask8 __U, __m512i __A) -{ - return (__m512i) __builtin_ia32_vplzcntq_512_mask ((__v8di) __A, - (__v8di) __W, - (__mmask8) __U); -} - -static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_maskz_lzcnt_epi64 (__mmask8 __U, __m512i __A) -{ - return (__m512i) __builtin_ia32_vplzcntq_512_mask ((__v8di) __A, - (__v8di) _mm512_setzero_si512 (), - (__mmask8) __U); -} -#undef __DEFAULT_FN_ATTRS - -#endif +/*===------------- avx512cdintrin.h - AVX512CD intrinsics ------------------=== + * + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512CDINTRIN_H +#define __AVX512CDINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512cd"))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_conflict_epi64 (__m512i __A) +{ + return (__m512i) __builtin_ia32_vpconflictdi_512_mask ((__v8di) __A, + (__v8di) _mm512_setzero_si512 (), + (__mmask8) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_conflict_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_vpconflictdi_512_mask ((__v8di) __A, + (__v8di) __W, + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_conflict_epi64 (__mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_vpconflictdi_512_mask ((__v8di) __A, + (__v8di) _mm512_setzero_si512 (), + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_conflict_epi32 (__m512i __A) +{ + return (__m512i) __builtin_ia32_vpconflictsi_512_mask ((__v16si) __A, + (__v16si) _mm512_setzero_si512 (), + (__mmask16) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_conflict_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_vpconflictsi_512_mask ((__v16si) __A, + (__v16si) __W, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_conflict_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_vpconflictsi_512_mask ((__v16si) __A, + (__v16si) _mm512_setzero_si512 (), + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_lzcnt_epi32 (__m512i __A) +{ + return (__m512i) __builtin_ia32_vplzcntd_512_mask ((__v16si) __A, + (__v16si) _mm512_setzero_si512 (), + (__mmask16) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_lzcnt_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_vplzcntd_512_mask ((__v16si) __A, + (__v16si) __W, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_lzcnt_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_vplzcntd_512_mask ((__v16si) __A, + (__v16si) _mm512_setzero_si512 (), + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_lzcnt_epi64 (__m512i __A) +{ + return (__m512i) __builtin_ia32_vplzcntq_512_mask ((__v8di) __A, + (__v8di) _mm512_setzero_si512 (), + (__mmask8) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_lzcnt_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_vplzcntq_512_mask ((__v8di) __A, + (__v8di) __W, + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_lzcnt_epi64 (__mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_vplzcntq_512_mask ((__v8di) __A, + (__v8di) _mm512_setzero_si512 (), + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_broadcastmb_epi64 (__mmask8 __A) +{ + return (__m512i) __builtin_ia32_broadcastmb512 (__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_broadcastmw_epi32 (__mmask16 __A) +{ + return (__m512i) __builtin_ia32_broadcastmw512 (__A); +} + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/c_headers/avx512dqintrin.h b/c_headers/avx512dqintrin.h index c946de286..ae44b98a9 100644 --- a/c_headers/avx512dqintrin.h +++ b/c_headers/avx512dqintrin.h @@ -29,214 +29,1304 @@ #define __AVX512DQINTRIN_H /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512dq"))) static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mullo_epi64 (__m512i __A, __m512i __B) { - return (__m512i) ((__v8di) __A * (__v8di) __B); + return (__m512i) ((__v8du) __A * (__v8du) __B); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_mask_mullo_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_pmullq512_mask ((__v8di) __A, - (__v8di) __B, - (__v8di) __W, - (__mmask8) __U); +_mm512_mask_mullo_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_mullo_epi64(__A, __B), + (__v8di)__W); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_maskz_mullo_epi64 (__mmask8 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_pmullq512_mask ((__v8di) __A, - (__v8di) __B, - (__v8di) - _mm512_setzero_si512 (), - (__mmask8) __U); +_mm512_maskz_mullo_epi64(__mmask8 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_mullo_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); } static __inline__ __m512d __DEFAULT_FN_ATTRS -_mm512_xor_pd (__m512d __A, __m512d __B) { - return (__m512d) ((__v8di) __A ^ (__v8di) __B); +_mm512_xor_pd(__m512d __A, __m512d __B) { + return (__m512d)((__v8du)__A ^ (__v8du)__B); } static __inline__ __m512d __DEFAULT_FN_ATTRS -_mm512_mask_xor_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { - return (__m512d) __builtin_ia32_xorpd512_mask ((__v8df) __A, - (__v8df) __B, - (__v8df) __W, - (__mmask8) __U); +_mm512_mask_xor_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_xor_pd(__A, __B), + (__v8df)__W); } static __inline__ __m512d __DEFAULT_FN_ATTRS -_mm512_maskz_xor_pd (__mmask8 __U, __m512d __A, __m512d __B) { - return (__m512d) __builtin_ia32_xorpd512_mask ((__v8df) __A, - (__v8df) __B, - (__v8df) - _mm512_setzero_pd (), - (__mmask8) __U); +_mm512_maskz_xor_pd(__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_xor_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); } static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_xor_ps (__m512 __A, __m512 __B) { - return (__m512) ((__v16si) __A ^ (__v16si) __B); + return (__m512)((__v16su)__A ^ (__v16su)__B); } static __inline__ __m512 __DEFAULT_FN_ATTRS -_mm512_mask_xor_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { - return (__m512) __builtin_ia32_xorps512_mask ((__v16sf) __A, - (__v16sf) __B, - (__v16sf) __W, - (__mmask16) __U); +_mm512_mask_xor_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_xor_ps(__A, __B), + (__v16sf)__W); } static __inline__ __m512 __DEFAULT_FN_ATTRS -_mm512_maskz_xor_ps (__mmask16 __U, __m512 __A, __m512 __B) { - return (__m512) __builtin_ia32_xorps512_mask ((__v16sf) __A, - (__v16sf) __B, - (__v16sf) - _mm512_setzero_ps (), - (__mmask16) __U); +_mm512_maskz_xor_ps(__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_xor_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); } static __inline__ __m512d __DEFAULT_FN_ATTRS -_mm512_or_pd (__m512d __A, __m512d __B) { - return (__m512d) ((__v8di) __A | (__v8di) __B); +_mm512_or_pd(__m512d __A, __m512d __B) { + return (__m512d)((__v8du)__A | (__v8du)__B); } static __inline__ __m512d __DEFAULT_FN_ATTRS -_mm512_mask_or_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { - return (__m512d) __builtin_ia32_orpd512_mask ((__v8df) __A, - (__v8df) __B, - (__v8df) __W, - (__mmask8) __U); +_mm512_mask_or_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_or_pd(__A, __B), + (__v8df)__W); } static __inline__ __m512d __DEFAULT_FN_ATTRS -_mm512_maskz_or_pd (__mmask8 __U, __m512d __A, __m512d __B) { - return (__m512d) __builtin_ia32_orpd512_mask ((__v8df) __A, - (__v8df) __B, - (__v8df) - _mm512_setzero_pd (), - (__mmask8) __U); +_mm512_maskz_or_pd(__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_or_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); } static __inline__ __m512 __DEFAULT_FN_ATTRS -_mm512_or_ps (__m512 __A, __m512 __B) { - return (__m512) ((__v16si) __A | (__v16si) __B); +_mm512_or_ps(__m512 __A, __m512 __B) { + return (__m512)((__v16su)__A | (__v16su)__B); } static __inline__ __m512 __DEFAULT_FN_ATTRS -_mm512_mask_or_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { - return (__m512) __builtin_ia32_orps512_mask ((__v16sf) __A, - (__v16sf) __B, - (__v16sf) __W, - (__mmask16) __U); +_mm512_mask_or_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_or_ps(__A, __B), + (__v16sf)__W); } static __inline__ __m512 __DEFAULT_FN_ATTRS -_mm512_maskz_or_ps (__mmask16 __U, __m512 __A, __m512 __B) { - return (__m512) __builtin_ia32_orps512_mask ((__v16sf) __A, - (__v16sf) __B, - (__v16sf) - _mm512_setzero_ps (), - (__mmask16) __U); +_mm512_maskz_or_ps(__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_or_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); } static __inline__ __m512d __DEFAULT_FN_ATTRS -_mm512_and_pd (__m512d __A, __m512d __B) { - return (__m512d) ((__v8di) __A & (__v8di) __B); +_mm512_and_pd(__m512d __A, __m512d __B) { + return (__m512d)((__v8du)__A & (__v8du)__B); } static __inline__ __m512d __DEFAULT_FN_ATTRS -_mm512_mask_and_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { - return (__m512d) __builtin_ia32_andpd512_mask ((__v8df) __A, - (__v8df) __B, - (__v8df) __W, - (__mmask8) __U); +_mm512_mask_and_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_and_pd(__A, __B), + (__v8df)__W); } static __inline__ __m512d __DEFAULT_FN_ATTRS -_mm512_maskz_and_pd (__mmask8 __U, __m512d __A, __m512d __B) { - return (__m512d) __builtin_ia32_andpd512_mask ((__v8df) __A, - (__v8df) __B, - (__v8df) - _mm512_setzero_pd (), - (__mmask8) __U); +_mm512_maskz_and_pd(__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_and_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); } static __inline__ __m512 __DEFAULT_FN_ATTRS -_mm512_and_ps (__m512 __A, __m512 __B) { - return (__m512) ((__v16si) __A & (__v16si) __B); +_mm512_and_ps(__m512 __A, __m512 __B) { + return (__m512)((__v16su)__A & (__v16su)__B); } static __inline__ __m512 __DEFAULT_FN_ATTRS -_mm512_mask_and_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { - return (__m512) __builtin_ia32_andps512_mask ((__v16sf) __A, - (__v16sf) __B, - (__v16sf) __W, - (__mmask16) __U); +_mm512_mask_and_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_and_ps(__A, __B), + (__v16sf)__W); } static __inline__ __m512 __DEFAULT_FN_ATTRS -_mm512_maskz_and_ps (__mmask16 __U, __m512 __A, __m512 __B) { - return (__m512) __builtin_ia32_andps512_mask ((__v16sf) __A, - (__v16sf) __B, - (__v16sf) - _mm512_setzero_ps (), - (__mmask16) __U); +_mm512_maskz_and_ps(__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_and_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); } static __inline__ __m512d __DEFAULT_FN_ATTRS -_mm512_andnot_pd (__m512d __A, __m512d __B) { - return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A, - (__v8df) __B, - (__v8df) - _mm512_setzero_pd (), - (__mmask8) -1); +_mm512_andnot_pd(__m512d __A, __m512d __B) { + return (__m512d)(~(__v8du)__A & (__v8du)__B); } static __inline__ __m512d __DEFAULT_FN_ATTRS -_mm512_mask_andnot_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { - return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A, - (__v8df) __B, - (__v8df) __W, - (__mmask8) __U); +_mm512_mask_andnot_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_andnot_pd(__A, __B), + (__v8df)__W); } static __inline__ __m512d __DEFAULT_FN_ATTRS -_mm512_maskz_andnot_pd (__mmask8 __U, __m512d __A, __m512d __B) { - return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A, - (__v8df) __B, - (__v8df) - _mm512_setzero_pd (), - (__mmask8) __U); +_mm512_maskz_andnot_pd(__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_andnot_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); } static __inline__ __m512 __DEFAULT_FN_ATTRS -_mm512_andnot_ps (__m512 __A, __m512 __B) { - return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A, - (__v16sf) __B, - (__v16sf) - _mm512_setzero_ps (), - (__mmask16) -1); +_mm512_andnot_ps(__m512 __A, __m512 __B) { + return (__m512)(~(__v16su)__A & (__v16su)__B); } static __inline__ __m512 __DEFAULT_FN_ATTRS -_mm512_mask_andnot_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { - return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A, - (__v16sf) __B, - (__v16sf) __W, - (__mmask16) __U); +_mm512_mask_andnot_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_andnot_ps(__A, __B), + (__v16sf)__W); } static __inline__ __m512 __DEFAULT_FN_ATTRS -_mm512_maskz_andnot_ps (__mmask16 __U, __m512 __A, __m512 __B) { - return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A, - (__v16sf) __B, - (__v16sf) - _mm512_setzero_ps (), - (__mmask16) __U); +_mm512_maskz_andnot_ps(__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_andnot_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); } +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_cvtpd_epi64 (__m512d __A) { + return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvtpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A) { + return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtpd_epi64 (__mmask8 __U, __m512d __A) { + return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundpd_epi64(A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_cvt_roundpd_epi64(W, U, A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_maskz_cvt_roundpd_epi64(U, A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_cvtpd_epu64 (__m512d __A) { + return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvtpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A) { + return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtpd_epu64 (__mmask8 __U, __m512d __A) { + return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundpd_epu64(A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_cvt_roundpd_epu64(W, U, A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_maskz_cvt_roundpd_epu64(U, A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_cvtps_epi64 (__m256 __A) { + return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvtps_epi64 (__m512i __W, __mmask8 __U, __m256 __A) { + return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtps_epi64 (__mmask8 __U, __m256 __A) { + return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundps_epi64(A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_cvt_roundps_epi64(W, U, A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_maskz_cvt_roundps_epi64(U, A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_cvtps_epu64 (__m256 __A) { + return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvtps_epu64 (__m512i __W, __mmask8 __U, __m256 __A) { + return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtps_epu64 (__mmask8 __U, __m256 __A) { + return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundps_epu64(A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_cvt_roundps_epu64(W, U, A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_maskz_cvt_roundps_epu64(U, A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U), (int)(R)); }) + + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_cvtepi64_pd (__m512i __A) { + return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A, + (__v8df) _mm512_setzero_pd(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_cvtepi64_pd (__m512d __W, __mmask8 __U, __m512i __A) { + return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepi64_pd (__mmask8 __U, __m512i __A) { + return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A, + (__v8df) _mm512_setzero_pd(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundepi64_pd(A, R) __extension__ ({ \ + (__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_cvt_roundepi64_pd(W, U, A, R) __extension__ ({ \ + (__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_maskz_cvt_roundepi64_pd(U, A, R) __extension__ ({ \ + (__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm512_cvtepi64_ps (__m512i __A) { + return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A, + (__v8sf) _mm256_setzero_ps(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm512_mask_cvtepi64_ps (__m256 __W, __mmask8 __U, __m512i __A) { + return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A, + (__v8sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepi64_ps (__mmask8 __U, __m512i __A) { + return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A, + (__v8sf) _mm256_setzero_ps(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundepi64_ps(A, R) __extension__ ({ \ + (__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_cvt_roundepi64_ps(W, U, A, R) __extension__ ({ \ + (__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \ + (__v8sf)(__m256)(W), (__mmask8)(U), \ + (int)(R)); }) + +#define _mm512_maskz_cvt_roundepi64_ps(U, A, R) __extension__ ({ \ + (__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)(U), (int)(R)); }) + + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_cvttpd_epi64 (__m512d __A) { + return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvttpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A) { + return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvttpd_epi64 (__mmask8 __U, __m512d __A) { + return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvtt_roundpd_epi64(A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_cvtt_roundpd_epi64(W, U, A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_maskz_cvtt_roundpd_epi64(U, A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_cvttpd_epu64 (__m512d __A) { + return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvttpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A) { + return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvttpd_epu64 (__mmask8 __U, __m512d __A) { + return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvtt_roundpd_epu64(A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_cvtt_roundpd_epu64(W, U, A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_maskz_cvtt_roundpd_epu64(U, A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_cvttps_epi64 (__m256 __A) { + return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvttps_epi64 (__m512i __W, __mmask8 __U, __m256 __A) { + return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvttps_epi64 (__mmask8 __U, __m256 __A) { + return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvtt_roundps_epi64(A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_cvtt_roundps_epi64(W, U, A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_maskz_cvtt_roundps_epi64(U, A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_cvttps_epu64 (__m256 __A) { + return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvttps_epu64 (__m512i __W, __mmask8 __U, __m256 __A) { + return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvttps_epu64 (__mmask8 __U, __m256 __A) { + return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvtt_roundps_epu64(A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_cvtt_roundps_epu64(W, U, A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_maskz_cvtt_roundps_epu64(U, A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_cvtepu64_pd (__m512i __A) { + return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A, + (__v8df) _mm512_setzero_pd(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_cvtepu64_pd (__m512d __W, __mmask8 __U, __m512i __A) { + return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepu64_pd (__mmask8 __U, __m512i __A) { + return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A, + (__v8df) _mm512_setzero_pd(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundepu64_pd(A, R) __extension__ ({ \ + (__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_cvt_roundepu64_pd(W, U, A, R) __extension__ ({ \ + (__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), (int)(R)); }) + + +#define _mm512_maskz_cvt_roundepu64_pd(U, A, R) __extension__ ({ \ + (__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm512_cvtepu64_ps (__m512i __A) { + return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A, + (__v8sf) _mm256_setzero_ps(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm512_mask_cvtepu64_ps (__m256 __W, __mmask8 __U, __m512i __A) { + return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A, + (__v8sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepu64_ps (__mmask8 __U, __m512i __A) { + return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A, + (__v8sf) _mm256_setzero_ps(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundepu64_ps(A, R) __extension__ ({ \ + (__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_cvt_roundepu64_ps(W, U, A, R) __extension__ ({ \ + (__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \ + (__v8sf)(__m256)(W), (__mmask8)(U), \ + (int)(R)); }) + +#define _mm512_maskz_cvt_roundepu64_ps(U, A, R) __extension__ ({ \ + (__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_range_pd(A, B, C) __extension__ ({ \ + (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(C), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_mask_range_pd(W, U, A, B, C) __extension__ ({ \ + (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(C), \ + (__v8df)(__m512d)(W), (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_maskz_range_pd(U, A, B, C) __extension__ ({ \ + (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(C), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_range_round_pd(A, B, C, R) __extension__ ({ \ + (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(C), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_range_round_pd(W, U, A, B, C, R) __extension__ ({ \ + (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(C), \ + (__v8df)(__m512d)(W), (__mmask8)(U), \ + (int)(R)); }) + +#define _mm512_maskz_range_round_pd(U, A, B, C, R) __extension__ ({ \ + (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(C), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_range_ps(A, B, C) __extension__ ({ \ + (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(C), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_mask_range_ps(W, U, A, B, C) __extension__ ({ \ + (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(C), \ + (__v16sf)(__m512)(W), (__mmask16)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_maskz_range_ps(U, A, B, C) __extension__ ({ \ + (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(C), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_range_round_ps(A, B, C, R) __extension__ ({ \ + (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(C), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, (int)(R)); }) + +#define _mm512_mask_range_round_ps(W, U, A, B, C, R) __extension__ ({ \ + (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(C), \ + (__v16sf)(__m512)(W), (__mmask16)(U), \ + (int)(R)); }) + +#define _mm512_maskz_range_round_ps(U, A, B, C, R) __extension__ ({ \ + (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(C), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R)); }) + +#define _mm_range_round_ss(A, B, C, R) __extension__ ({ \ + (__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8) -1, (int)(C),\ + (int)(R)); }) + +#define _mm_range_ss(A ,B , C) _mm_range_round_ss(A, B, C ,_MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_range_round_ss(W, U, A, B, C, R) __extension__ ({ \ + (__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W),\ + (__mmask8)(U), (int)(C),\ + (int)(R)); }) + +#define _mm_mask_range_ss(W , U, A, B, C) _mm_mask_range_round_ss(W, U, A, B, C , _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_range_round_ss(U, A, B, C, R) __extension__ ({ \ + (__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(C),\ + (int)(R)); }) + +#define _mm_maskz_range_ss(U, A ,B , C) _mm_maskz_range_round_ss(U, A, B, C ,_MM_FROUND_CUR_DIRECTION) + +#define _mm_range_round_sd(A, B, C, R) __extension__ ({ \ + (__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8) -1, (int)(C),\ + (int)(R)); }) + +#define _mm_range_sd(A ,B , C) _mm_range_round_sd(A, B, C ,_MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_range_round_sd(W, U, A, B, C, R) __extension__ ({ \ + (__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W),\ + (__mmask8)(U), (int)(C),\ + (int)(R)); }) + +#define _mm_mask_range_sd(W, U, A, B, C) _mm_mask_range_round_sd(W, U, A, B, C ,_MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_range_round_sd(U, A, B, C, R) __extension__ ({ \ + (__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(C),\ + (int)(R)); }) + +#define _mm_maskz_range_sd(U, A, B, C) _mm_maskz_range_round_sd(U, A, B, C ,_MM_FROUND_CUR_DIRECTION) + +#define _mm512_reduce_pd(A, B) __extension__ ({ \ + (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_mask_reduce_pd(W, U, A, B) __extension__ ({ \ + (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_maskz_reduce_pd(U, A, B) __extension__ ({ \ + (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_reduce_ps(A, B) __extension__ ({ \ + (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_mask_reduce_ps(W, U, A, B) __extension__ ({ \ + (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_maskz_reduce_ps(U, A, B) __extension__ ({ \ + (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_reduce_round_pd(A, B, R) __extension__ ({\ + (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_reduce_round_pd(W, U, A, B, R) __extension__ ({\ + (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_maskz_reduce_round_pd(U, A, B, R) __extension__ ({\ + (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_reduce_round_ps(A, B, R) __extension__ ({\ + (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, (int)(R)); }) + +#define _mm512_mask_reduce_round_ps(W, U, A, B, R) __extension__ ({\ + (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), (int)(R)); }) + +#define _mm512_maskz_reduce_round_ps(U, A, B, R) __extension__ ({\ + (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R)); }) + +#define _mm_reduce_ss(A, B, C) __extension__ ({ \ + (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), (__mmask8)-1, \ + (int)(C), _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_mask_reduce_ss(W, U, A, B, C) __extension__ ({ \ + (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(C), _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_maskz_reduce_ss(U, A, B, C) __extension__ ({ \ + (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(C), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_reduce_round_ss(A, B, C, R) __extension__ ({ \ + (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), (__mmask8)-1, \ + (int)(C), (int)(R)); }) + +#define _mm_mask_reduce_round_ss(W, U, A, B, C, R) __extension__ ({ \ + (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(C), (int)(R)); }) + +#define _mm_maskz_reduce_round_ss(U, A, B, C, R) __extension__ ({ \ + (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(C), (int)(R)); }) + +#define _mm_reduce_sd(A, B, C) __extension__ ({ \ + (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(C), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_mask_reduce_sd(W, U, A, B, C) __extension__ ({ \ + (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), (__mmask8)(U), \ + (int)(C), _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_maskz_reduce_sd(U, A, B, C) __extension__ ({ \ + (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(C), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_reduce_round_sd(A, B, C, R) __extension__ ({ \ + (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(C), (int)(R)); }) + +#define _mm_mask_reduce_round_sd(W, U, A, B, C, R) __extension__ ({ \ + (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), (__mmask8)(U), \ + (int)(C), (int)(R)); }) + +#define _mm_maskz_reduce_round_sd(U, A, B, C, R) __extension__ ({ \ + (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(C), (int)(R)); }) + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_movepi32_mask (__m512i __A) +{ + return (__mmask16) __builtin_ia32_cvtd2mask512 ((__v16si) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_movm_epi32 (__mmask16 __A) +{ + return (__m512i) __builtin_ia32_cvtmask2d512 (__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_movm_epi64 (__mmask8 __A) +{ + return (__m512i) __builtin_ia32_cvtmask2q512 (__A); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm512_movepi64_mask (__m512i __A) +{ + return (__mmask8) __builtin_ia32_cvtq2mask512 ((__v8di) __A); +} + + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_broadcast_f32x2 (__m128 __A) +{ + return (__m512) __builtin_ia32_broadcastf32x2_512_mask ((__v4sf) __A, + (__v16sf)_mm512_undefined_ps(), + (__mmask16) -1); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_broadcast_f32x2 (__m512 __O, __mmask16 __M, __m128 __A) +{ + return (__m512) __builtin_ia32_broadcastf32x2_512_mask ((__v4sf) __A, + (__v16sf) + __O, __M); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_broadcast_f32x2 (__mmask16 __M, __m128 __A) +{ + return (__m512) __builtin_ia32_broadcastf32x2_512_mask ((__v4sf) __A, + (__v16sf)_mm512_setzero_ps (), + __M); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_broadcast_f32x8 (__m256 __A) +{ + return (__m512) __builtin_ia32_broadcastf32x8_512_mask ((__v8sf) __A, + _mm512_undefined_ps(), + (__mmask16) -1); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_broadcast_f32x8 (__m512 __O, __mmask16 __M, __m256 __A) +{ + return (__m512) __builtin_ia32_broadcastf32x8_512_mask ((__v8sf) __A, + (__v16sf)__O, + __M); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_broadcast_f32x8 (__mmask16 __M, __m256 __A) +{ + return (__m512) __builtin_ia32_broadcastf32x8_512_mask ((__v8sf) __A, + (__v16sf)_mm512_setzero_ps (), + __M); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_broadcast_f64x2 (__m128d __A) +{ + return (__m512d) __builtin_ia32_broadcastf64x2_512_mask ((__v2df) __A, + (__v8df)_mm512_undefined_pd(), + (__mmask8) -1); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_broadcast_f64x2 (__m512d __O, __mmask8 __M, __m128d __A) +{ + return (__m512d) __builtin_ia32_broadcastf64x2_512_mask ((__v2df) __A, + (__v8df) + __O, __M); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_broadcast_f64x2 (__mmask8 __M, __m128d __A) +{ + return (__m512d) __builtin_ia32_broadcastf64x2_512_mask ((__v2df) __A, + (__v8df)_mm512_setzero_ps (), + __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_broadcast_i32x2 (__m128i __A) +{ + return (__m512i) __builtin_ia32_broadcasti32x2_512_mask ((__v4si) __A, + (__v16si)_mm512_setzero_si512(), + (__mmask16) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_broadcast_i32x2 (__m512i __O, __mmask16 __M, __m128i __A) +{ + return (__m512i) __builtin_ia32_broadcasti32x2_512_mask ((__v4si) __A, + (__v16si) + __O, __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_broadcast_i32x2 (__mmask16 __M, __m128i __A) +{ + return (__m512i) __builtin_ia32_broadcasti32x2_512_mask ((__v4si) __A, + (__v16si)_mm512_setzero_si512 (), + __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_broadcast_i32x8 (__m256i __A) +{ + return (__m512i) __builtin_ia32_broadcasti32x8_512_mask ((__v8si) __A, + (__v16si)_mm512_setzero_si512(), + (__mmask16) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_broadcast_i32x8 (__m512i __O, __mmask16 __M, __m256i __A) +{ + return (__m512i) __builtin_ia32_broadcasti32x8_512_mask ((__v8si) __A, + (__v16si)__O, + __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_broadcast_i32x8 (__mmask16 __M, __m256i __A) +{ + return (__m512i) __builtin_ia32_broadcasti32x8_512_mask ((__v8si) __A, + (__v16si) + _mm512_setzero_si512 (), + __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_broadcast_i64x2 (__m128i __A) +{ + return (__m512i) __builtin_ia32_broadcasti64x2_512_mask ((__v2di) __A, + (__v8di)_mm512_setzero_si512(), + (__mmask8) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_broadcast_i64x2 (__m512i __O, __mmask8 __M, __m128i __A) +{ + return (__m512i) __builtin_ia32_broadcasti64x2_512_mask ((__v2di) __A, + (__v8di) + __O, __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_broadcast_i64x2 (__mmask8 __M, __m128i __A) +{ + return (__m512i) __builtin_ia32_broadcasti64x2_512_mask ((__v2di) __A, + (__v8di)_mm512_setzero_si512 (), + __M); +} + +#define _mm512_extractf32x8_ps(A, imm) __extension__ ({ \ + (__m256)__builtin_shufflevector((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_undefined_ps(), \ + ((imm) & 1) ? 8 : 0, \ + ((imm) & 1) ? 9 : 1, \ + ((imm) & 1) ? 10 : 2, \ + ((imm) & 1) ? 11 : 3, \ + ((imm) & 1) ? 12 : 4, \ + ((imm) & 1) ? 13 : 5, \ + ((imm) & 1) ? 14 : 6, \ + ((imm) & 1) ? 15 : 7); }) + +#define _mm512_mask_extractf32x8_ps(W, U, A, imm) __extension__ ({ \ + (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm512_extractf32x8_ps((A), (imm)), \ + (__v8sf)(W)); }) + +#define _mm512_maskz_extractf32x8_ps(U, A, imm) __extension__ ({ \ + (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm512_extractf32x8_ps((A), (imm)), \ + (__v8sf)_mm256_setzero_ps()); }) + +#define _mm512_extractf64x2_pd(A, imm) __extension__ ({ \ + (__m128d)__builtin_shufflevector((__v8df)(__m512d)(A), \ + (__v8df)_mm512_undefined_pd(), \ + 0 + ((imm) & 0x3) * 2, \ + 1 + ((imm) & 0x3) * 2); }) + +#define _mm512_mask_extractf64x2_pd(W, U, A, imm) __extension__ ({ \ + (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ + (__v2df)_mm512_extractf64x2_pd((A), (imm)), \ + (__v2df)(W)); }) + +#define _mm512_maskz_extractf64x2_pd(U, A, imm) __extension__ ({ \ + (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ + (__v2df)_mm512_extractf64x2_pd((A), (imm)), \ + (__v2df)_mm_setzero_pd()); }) + +#define _mm512_extracti32x8_epi32(A, imm) __extension__ ({ \ + (__m256i)__builtin_shufflevector((__v16si)(__m512i)(A), \ + (__v16si)_mm512_undefined_epi32(), \ + ((imm) & 1) ? 8 : 0, \ + ((imm) & 1) ? 9 : 1, \ + ((imm) & 1) ? 10 : 2, \ + ((imm) & 1) ? 11 : 3, \ + ((imm) & 1) ? 12 : 4, \ + ((imm) & 1) ? 13 : 5, \ + ((imm) & 1) ? 14 : 6, \ + ((imm) & 1) ? 15 : 7); }) + +#define _mm512_mask_extracti32x8_epi32(W, U, A, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm512_extracti32x8_epi32((A), (imm)), \ + (__v8si)(W)); }) + +#define _mm512_maskz_extracti32x8_epi32(U, A, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm512_extracti32x8_epi32((A), (imm)), \ + (__v8si)_mm256_setzero_si256()); }) + +#define _mm512_extracti64x2_epi64(A, imm) __extension__ ({ \ + (__m128i)__builtin_shufflevector((__v8di)(__m512i)(A), \ + (__v8di)_mm512_undefined_epi32(), \ + 0 + ((imm) & 0x3) * 2, \ + 1 + ((imm) & 0x3) * 2); }) + +#define _mm512_mask_extracti64x2_epi64(W, U, A, imm) __extension__ ({ \ + (__m128d)__builtin_ia32_selectq_128((__mmask8)(U), \ + (__v2di)_mm512_extracti64x2_epi64((A), (imm)), \ + (__v2di)(W)); }) + +#define _mm512_maskz_extracti64x2_epi64(U, A, imm) __extension__ ({ \ + (__m128d)__builtin_ia32_selectq_128((__mmask8)(U), \ + (__v2di)_mm512_extracti64x2_epi64((A), (imm)), \ + (__v2di)_mm_setzero_di()); }) + +#define _mm512_insertf32x8(A, B, imm) __extension__ ({ \ + (__m512)__builtin_shufflevector((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_castps256_ps512((__m256)(B)),\ + ((imm) & 0x1) ? 0 : 16, \ + ((imm) & 0x1) ? 1 : 17, \ + ((imm) & 0x1) ? 2 : 18, \ + ((imm) & 0x1) ? 3 : 19, \ + ((imm) & 0x1) ? 4 : 20, \ + ((imm) & 0x1) ? 5 : 21, \ + ((imm) & 0x1) ? 6 : 22, \ + ((imm) & 0x1) ? 7 : 23, \ + ((imm) & 0x1) ? 16 : 8, \ + ((imm) & 0x1) ? 17 : 9, \ + ((imm) & 0x1) ? 18 : 10, \ + ((imm) & 0x1) ? 19 : 11, \ + ((imm) & 0x1) ? 20 : 12, \ + ((imm) & 0x1) ? 21 : 13, \ + ((imm) & 0x1) ? 22 : 14, \ + ((imm) & 0x1) ? 23 : 15); }) + +#define _mm512_mask_insertf32x8(W, U, A, B, imm) __extension__ ({ \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_insertf32x8((A), (B), (imm)), \ + (__v16sf)(W)); }) + +#define _mm512_maskz_insertf32x8(U, A, B, imm) __extension__ ({ \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_insertf32x8((A), (B), (imm)), \ + (__v16sf)_mm512_setzero_ps()); }) + +#define _mm512_insertf64x2(A, B, imm) __extension__ ({ \ + (__m512d)__builtin_shufflevector((__v8df)(__m512d)(A), \ + (__v8df)_mm512_castpd128_pd512((__m128d)(B)),\ + (((imm) & 0x3) == 0) ? 8 : 0, \ + (((imm) & 0x3) == 0) ? 9 : 1, \ + (((imm) & 0x3) == 1) ? 8 : 2, \ + (((imm) & 0x3) == 1) ? 9 : 3, \ + (((imm) & 0x3) == 2) ? 8 : 4, \ + (((imm) & 0x3) == 2) ? 9 : 5, \ + (((imm) & 0x3) == 3) ? 8 : 6, \ + (((imm) & 0x3) == 3) ? 9 : 7); }) + +#define _mm512_mask_insertf64x2(W, U, A, B, imm) __extension__ ({ \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_insertf64x2((A), (B), (imm)), \ + (__v8df)(W)); }) + +#define _mm512_maskz_insertf64x2(U, A, B, imm) __extension__ ({ \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_insertf64x2((A), (B), (imm)), \ + (__v8df)_mm512_setzero_pd()); }) + +#define _mm512_inserti32x8(A, B, imm) __extension__ ({ \ + (__m512i)__builtin_shufflevector((__v16si)(__m512i)(A), \ + (__v16si)_mm512_castsi256_si512((__m256i)(B)),\ + ((imm) & 0x1) ? 0 : 16, \ + ((imm) & 0x1) ? 1 : 17, \ + ((imm) & 0x1) ? 2 : 18, \ + ((imm) & 0x1) ? 3 : 19, \ + ((imm) & 0x1) ? 4 : 20, \ + ((imm) & 0x1) ? 5 : 21, \ + ((imm) & 0x1) ? 6 : 22, \ + ((imm) & 0x1) ? 7 : 23, \ + ((imm) & 0x1) ? 16 : 8, \ + ((imm) & 0x1) ? 17 : 9, \ + ((imm) & 0x1) ? 18 : 10, \ + ((imm) & 0x1) ? 19 : 11, \ + ((imm) & 0x1) ? 20 : 12, \ + ((imm) & 0x1) ? 21 : 13, \ + ((imm) & 0x1) ? 22 : 14, \ + ((imm) & 0x1) ? 23 : 15); }) + +#define _mm512_mask_inserti32x8(W, U, A, B, imm) __extension__ ({ \ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_inserti32x8((A), (B), (imm)), \ + (__v16si)(W)); }) + +#define _mm512_maskz_inserti32x8(U, A, B, imm) __extension__ ({ \ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_inserti32x8((A), (B), (imm)), \ + (__v16si)_mm512_setzero_si512()); }) + +#define _mm512_inserti64x2(A, B, imm) __extension__ ({ \ + (__m512i)__builtin_shufflevector((__v8di)(__m512i)(A), \ + (__v8di)_mm512_castsi128_si512((__m128i)(B)),\ + (((imm) & 0x3) == 0) ? 8 : 0, \ + (((imm) & 0x3) == 0) ? 9 : 1, \ + (((imm) & 0x3) == 1) ? 8 : 2, \ + (((imm) & 0x3) == 1) ? 9 : 3, \ + (((imm) & 0x3) == 2) ? 8 : 4, \ + (((imm) & 0x3) == 2) ? 9 : 5, \ + (((imm) & 0x3) == 3) ? 8 : 6, \ + (((imm) & 0x3) == 3) ? 9 : 7); }) + +#define _mm512_mask_inserti64x2(W, U, A, B, imm) __extension__ ({ \ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_inserti64x2((A), (B), (imm)), \ + (__v8di)(W)); }) + +#define _mm512_maskz_inserti64x2(U, A, B, imm) __extension__ ({ \ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_inserti64x2((A), (B), (imm)), \ + (__v8di)_mm512_setzero_si512()); }) + +#define _mm512_mask_fpclass_ps_mask(U, A, imm) __extension__ ({ \ + (__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(A), \ + (int)(imm), (__mmask16)(U)); }) + +#define _mm512_fpclass_ps_mask(A, imm) __extension__ ({ \ + (__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(A), \ + (int)(imm), (__mmask16)-1); }) + +#define _mm512_mask_fpclass_pd_mask(U, A, imm) __extension__ ({ \ + (__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)(__m512d)(A), (int)(imm), \ + (__mmask8)(U)); }) + +#define _mm512_fpclass_pd_mask(A, imm) __extension__ ({ \ + (__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)(__m512d)(A), (int)(imm), \ + (__mmask8)-1); }) + +#define _mm_fpclass_sd_mask(A, imm) __extension__ ({ \ + (__mmask8)__builtin_ia32_fpclasssd_mask((__v2df)(__m128d)(A), (int)(imm), \ + (__mmask8)-1); }) + +#define _mm_mask_fpclass_sd_mask(U, A, imm) __extension__ ({ \ + (__mmask8)__builtin_ia32_fpclasssd_mask((__v2df)(__m128d)(A), (int)(imm), \ + (__mmask8)(U)); }) + +#define _mm_fpclass_ss_mask(A, imm) __extension__ ({ \ + (__mmask8)__builtin_ia32_fpclassss_mask((__v4sf)(__m128)(A), (int)(imm), \ + (__mmask8)-1); }) + +#define _mm_mask_fpclass_ss_mask(U, A, imm) __extension__ ({ \ + (__mmask8)__builtin_ia32_fpclassss_mask((__v4sf)(__m128)(A), (int)(imm), \ + (__mmask8)(U)); }) + #undef __DEFAULT_FN_ATTRS #endif diff --git a/c_headers/avx512erintrin.h b/c_headers/avx512erintrin.h index 56edffc11..8ff212c42 100644 --- a/c_headers/avx512erintrin.h +++ b/c_headers/avx512erintrin.h @@ -1,4 +1,4 @@ -/*===---- avx512fintrin.h - AVX2 intrinsics -----------------------------------=== +/*===---- avx512erintrin.h - AVX512ER intrinsics ---------------------------=== * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -31,66 +31,66 @@ #define _mm512_exp2a23_round_pd(A, R) __extension__ ({ \ (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \ (__v8df)_mm512_setzero_pd(), \ - (__mmask8)-1, (R)); }) + (__mmask8)-1, (int)(R)); }) #define _mm512_mask_exp2a23_round_pd(S, M, A, R) __extension__ ({ \ (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \ - (__v8df)(__m512d)(S), \ - (__mmask8)(M), (R)); }) + (__v8df)(__m512d)(S), (__mmask8)(M), \ + (int)(R)); }) #define _mm512_maskz_exp2a23_round_pd(M, A, R) __extension__ ({ \ (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \ (__v8df)_mm512_setzero_pd(), \ - (__mmask8)(M), (R)); }) + (__mmask8)(M), (int)(R)); }) #define _mm512_exp2a23_pd(A) \ - _mm512_exp2a23_round_pd((A), _MM_FROUND_CUR_DIRECTION) + _mm512_exp2a23_round_pd((A), _MM_FROUND_CUR_DIRECTION) #define _mm512_mask_exp2a23_pd(S, M, A) \ - _mm512_mask_exp2a23_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION) + _mm512_mask_exp2a23_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION) #define _mm512_maskz_exp2a23_pd(M, A) \ - _mm512_maskz_exp2a23_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION) + _mm512_maskz_exp2a23_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION) #define _mm512_exp2a23_round_ps(A, R) __extension__ ({ \ (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \ (__v16sf)_mm512_setzero_ps(), \ - (__mmask8)-1, (R)); }) + (__mmask16)-1, (int)(R)); }) #define _mm512_mask_exp2a23_round_ps(S, M, A, R) __extension__ ({ \ (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)(__m512)(S), \ - (__mmask8)(M), (R)); }) + (__v16sf)(__m512)(S), (__mmask16)(M), \ + (int)(R)); }) #define _mm512_maskz_exp2a23_round_ps(M, A, R) __extension__ ({ \ (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \ (__v16sf)_mm512_setzero_ps(), \ - (__mmask8)(M), (R)); }) + (__mmask16)(M), (int)(R)); }) #define _mm512_exp2a23_ps(A) \ - _mm512_exp2a23_round_ps((A), _MM_FROUND_CUR_DIRECTION) + _mm512_exp2a23_round_ps((A), _MM_FROUND_CUR_DIRECTION) #define _mm512_mask_exp2a23_ps(S, M, A) \ - _mm512_mask_exp2a23_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION) + _mm512_mask_exp2a23_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION) #define _mm512_maskz_exp2a23_ps(M, A) \ - _mm512_maskz_exp2a23_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION) + _mm512_maskz_exp2a23_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION) // rsqrt28 #define _mm512_rsqrt28_round_pd(A, R) __extension__ ({ \ (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \ (__v8df)_mm512_setzero_pd(), \ - (__mmask8)-1, (R)); }) + (__mmask8)-1, (int)(R)); }) #define _mm512_mask_rsqrt28_round_pd(S, M, A, R) __extension__ ({ \ (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \ - (__v8df)(__m512d)(S), \ - (__mmask8)(M), (R)); }) + (__v8df)(__m512d)(S), (__mmask8)(M), \ + (int)(R)); }) #define _mm512_maskz_rsqrt28_round_pd(M, A, R) __extension__ ({ \ (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \ (__v8df)_mm512_setzero_pd(), \ - (__mmask8)(M), (R)); }) + (__mmask8)(M), (int)(R)); }) #define _mm512_rsqrt28_pd(A) \ _mm512_rsqrt28_round_pd((A), _MM_FROUND_CUR_DIRECTION) @@ -104,17 +104,17 @@ #define _mm512_rsqrt28_round_ps(A, R) __extension__ ({ \ (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \ (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)-1, (R)); }) + (__mmask16)-1, (int)(R)); }) #define _mm512_mask_rsqrt28_round_ps(S, M, A, R) __extension__ ({ \ (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)(__m512)(S), \ - (__mmask16)(M), (R)); }) + (__v16sf)(__m512)(S), (__mmask16)(M), \ + (int)(R)); }) #define _mm512_maskz_rsqrt28_round_ps(M, A, R) __extension__ ({ \ (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \ (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)(M), (R)); }) + (__mmask16)(M), (int)(R)); }) #define _mm512_rsqrt28_ps(A) \ _mm512_rsqrt28_round_ps((A), _MM_FROUND_CUR_DIRECTION) @@ -126,22 +126,22 @@ _mm512_maskz_rsqrt28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION) #define _mm_rsqrt28_round_ss(A, B, R) __extension__ ({ \ - (__m128)__builtin_ia32_rsqrt28ss_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)_mm_setzero_ps(), \ - (__mmask8)-1, (R)); }) + (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)); }) #define _mm_mask_rsqrt28_round_ss(S, M, A, B, R) __extension__ ({ \ - (__m128)__builtin_ia32_rsqrt28ss_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)(__m128)(S), \ - (__mmask8)(M), (R)); }) + (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(S), \ + (__mmask8)(M), (int)(R)); }) #define _mm_maskz_rsqrt28_round_ss(M, A, B, R) __extension__ ({ \ - (__m128)__builtin_ia32_rsqrt28ss_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)_mm_setzero_ps(), \ - (__mmask8)(M), (R)); }) + (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(M), (int)(R)); }) #define _mm_rsqrt28_ss(A, B) \ _mm_rsqrt28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION) @@ -153,22 +153,22 @@ _mm_maskz_rsqrt28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION) #define _mm_rsqrt28_round_sd(A, B, R) __extension__ ({ \ - (__m128d)__builtin_ia32_rsqrt28sd_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)_mm_setzero_pd(), \ - (__mmask8)-1, (R)); }) + (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)); }) #define _mm_mask_rsqrt28_round_sd(S, M, A, B, R) __extension__ ({ \ - (__m128d)__builtin_ia32_rsqrt28sd_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)(__m128d)(S), \ - (__mmask8)(M), (R)); }) + (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(S), \ + (__mmask8)(M), (int)(R)); }) #define _mm_maskz_rsqrt28_round_sd(M, A, B, R) __extension__ ({ \ - (__m128d)__builtin_ia32_rsqrt28sd_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)_mm_setzero_pd(), \ - (__mmask8)(M), (R)); }) + (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(M), (int)(R)); }) #define _mm_rsqrt28_sd(A, B) \ _mm_rsqrt28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION) @@ -177,23 +177,23 @@ _mm_mask_rsqrt28_round_sd((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION) #define _mm_maskz_rsqrt28_sd(M, A, B) \ - _mm_mask_rsqrt28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION) + _mm_maskz_rsqrt28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION) // rcp28 #define _mm512_rcp28_round_pd(A, R) __extension__ ({ \ (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \ (__v8df)_mm512_setzero_pd(), \ - (__mmask8)-1, (R)); }) + (__mmask8)-1, (int)(R)); }) #define _mm512_mask_rcp28_round_pd(S, M, A, R) __extension__ ({ \ (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \ - (__v8df)(__m512d)(S), \ - (__mmask8)(M), (R)); }) + (__v8df)(__m512d)(S), (__mmask8)(M), \ + (int)(R)); }) #define _mm512_maskz_rcp28_round_pd(M, A, R) __extension__ ({ \ (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \ (__v8df)_mm512_setzero_pd(), \ - (__mmask8)(M), (R)); }) + (__mmask8)(M), (int)(R)); }) #define _mm512_rcp28_pd(A) \ _mm512_rcp28_round_pd((A), _MM_FROUND_CUR_DIRECTION) @@ -207,17 +207,17 @@ #define _mm512_rcp28_round_ps(A, R) __extension__ ({ \ (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \ (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)-1, (R)); }) + (__mmask16)-1, (int)(R)); }) #define _mm512_mask_rcp28_round_ps(S, M, A, R) __extension__ ({ \ (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)(__m512)(S), \ - (__mmask16)(M), (R)); }) + (__v16sf)(__m512)(S), (__mmask16)(M), \ + (int)(R)); }) #define _mm512_maskz_rcp28_round_ps(M, A, R) __extension__ ({ \ (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \ (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)(M), (R)); }) + (__mmask16)(M), (int)(R)); }) #define _mm512_rcp28_ps(A) \ _mm512_rcp28_round_ps((A), _MM_FROUND_CUR_DIRECTION) @@ -229,22 +229,22 @@ _mm512_maskz_rcp28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION) #define _mm_rcp28_round_ss(A, B, R) __extension__ ({ \ - (__m128)__builtin_ia32_rcp28ss_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)_mm_setzero_ps(), \ - (__mmask8)-1, (R)); }) + (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)); }) #define _mm_mask_rcp28_round_ss(S, M, A, B, R) __extension__ ({ \ - (__m128)__builtin_ia32_rcp28ss_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)(__m128)(S), \ - (__mmask8)(M), (R)); }) + (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(S), \ + (__mmask8)(M), (int)(R)); }) #define _mm_maskz_rcp28_round_ss(M, A, B, R) __extension__ ({ \ - (__m128)__builtin_ia32_rcp28ss_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)_mm_setzero_ps(), \ - (__mmask8)(M), (R)); }) + (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(M), (int)(R)); }) #define _mm_rcp28_ss(A, B) \ _mm_rcp28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION) @@ -256,22 +256,22 @@ _mm_maskz_rcp28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION) #define _mm_rcp28_round_sd(A, B, R) __extension__ ({ \ - (__m128d)__builtin_ia32_rcp28sd_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)_mm_setzero_pd(), \ - (__mmask8)-1, (R)); }) + (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)); }) #define _mm_mask_rcp28_round_sd(S, M, A, B, R) __extension__ ({ \ - (__m128d)__builtin_ia32_rcp28sd_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)(__m128d)(S), \ - (__mmask8)(M), (R)); }) + (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(S), \ + (__mmask8)(M), (int)(R)); }) #define _mm_maskz_rcp28_round_sd(M, A, B, R) __extension__ ({ \ - (__m128d)__builtin_ia32_rcp28sd_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)_mm_setzero_pd(), \ - (__mmask8)(M), (R)); }) + (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(M), (int)(R)); }) #define _mm_rcp28_sd(A, B) \ _mm_rcp28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION) diff --git a/c_headers/avx512fintrin.h b/c_headers/avx512fintrin.h index 4f7cba0b1..e6a7217c8 100644 --- a/c_headers/avx512fintrin.h +++ b/c_headers/avx512fintrin.h @@ -1,4 +1,4 @@ -/*===---- avx512fintrin.h - AVX2 intrinsics --------------------------------=== +/*===---- avx512fintrin.h - AVX512F intrinsics -----------------------------=== * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -27,11 +27,19 @@ #ifndef __AVX512FINTRIN_H #define __AVX512FINTRIN_H +typedef char __v64qi __attribute__((__vector_size__(64))); +typedef short __v32hi __attribute__((__vector_size__(64))); typedef double __v8df __attribute__((__vector_size__(64))); typedef float __v16sf __attribute__((__vector_size__(64))); typedef long long __v8di __attribute__((__vector_size__(64))); typedef int __v16si __attribute__((__vector_size__(64))); +/* Unsigned types */ +typedef unsigned char __v64qu __attribute__((__vector_size__(64))); +typedef unsigned short __v32hu __attribute__((__vector_size__(64))); +typedef unsigned long long __v8du __attribute__((__vector_size__(64))); +typedef unsigned int __v16su __attribute__((__vector_size__(64))); + typedef float __m512 __attribute__((__vector_size__(64))); typedef double __m512d __attribute__((__vector_size__(64))); typedef long long __m512i __attribute__((__vector_size__(64))); @@ -46,8 +54,126 @@ typedef unsigned short __mmask16; #define _MM_FROUND_TO_ZERO 0x03 #define _MM_FROUND_CUR_DIRECTION 0x04 +/* Constants for integer comparison predicates */ +typedef enum { + _MM_CMPINT_EQ, /* Equal */ + _MM_CMPINT_LT, /* Less than */ + _MM_CMPINT_LE, /* Less than or Equal */ + _MM_CMPINT_UNUSED, + _MM_CMPINT_NE, /* Not Equal */ + _MM_CMPINT_NLT, /* Not Less than */ +#define _MM_CMPINT_GE _MM_CMPINT_NLT /* Greater than or Equal */ + _MM_CMPINT_NLE /* Not Less than or Equal */ +#define _MM_CMPINT_GT _MM_CMPINT_NLE /* Greater than */ +} _MM_CMPINT_ENUM; + +typedef enum +{ + _MM_PERM_AAAA = 0x00, _MM_PERM_AAAB = 0x01, _MM_PERM_AAAC = 0x02, + _MM_PERM_AAAD = 0x03, _MM_PERM_AABA = 0x04, _MM_PERM_AABB = 0x05, + _MM_PERM_AABC = 0x06, _MM_PERM_AABD = 0x07, _MM_PERM_AACA = 0x08, + _MM_PERM_AACB = 0x09, _MM_PERM_AACC = 0x0A, _MM_PERM_AACD = 0x0B, + _MM_PERM_AADA = 0x0C, _MM_PERM_AADB = 0x0D, _MM_PERM_AADC = 0x0E, + _MM_PERM_AADD = 0x0F, _MM_PERM_ABAA = 0x10, _MM_PERM_ABAB = 0x11, + _MM_PERM_ABAC = 0x12, _MM_PERM_ABAD = 0x13, _MM_PERM_ABBA = 0x14, + _MM_PERM_ABBB = 0x15, _MM_PERM_ABBC = 0x16, _MM_PERM_ABBD = 0x17, + _MM_PERM_ABCA = 0x18, _MM_PERM_ABCB = 0x19, _MM_PERM_ABCC = 0x1A, + _MM_PERM_ABCD = 0x1B, _MM_PERM_ABDA = 0x1C, _MM_PERM_ABDB = 0x1D, + _MM_PERM_ABDC = 0x1E, _MM_PERM_ABDD = 0x1F, _MM_PERM_ACAA = 0x20, + _MM_PERM_ACAB = 0x21, _MM_PERM_ACAC = 0x22, _MM_PERM_ACAD = 0x23, + _MM_PERM_ACBA = 0x24, _MM_PERM_ACBB = 0x25, _MM_PERM_ACBC = 0x26, + _MM_PERM_ACBD = 0x27, _MM_PERM_ACCA = 0x28, _MM_PERM_ACCB = 0x29, + _MM_PERM_ACCC = 0x2A, _MM_PERM_ACCD = 0x2B, _MM_PERM_ACDA = 0x2C, + _MM_PERM_ACDB = 0x2D, _MM_PERM_ACDC = 0x2E, _MM_PERM_ACDD = 0x2F, + _MM_PERM_ADAA = 0x30, _MM_PERM_ADAB = 0x31, _MM_PERM_ADAC = 0x32, + _MM_PERM_ADAD = 0x33, _MM_PERM_ADBA = 0x34, _MM_PERM_ADBB = 0x35, + _MM_PERM_ADBC = 0x36, _MM_PERM_ADBD = 0x37, _MM_PERM_ADCA = 0x38, + _MM_PERM_ADCB = 0x39, _MM_PERM_ADCC = 0x3A, _MM_PERM_ADCD = 0x3B, + _MM_PERM_ADDA = 0x3C, _MM_PERM_ADDB = 0x3D, _MM_PERM_ADDC = 0x3E, + _MM_PERM_ADDD = 0x3F, _MM_PERM_BAAA = 0x40, _MM_PERM_BAAB = 0x41, + _MM_PERM_BAAC = 0x42, _MM_PERM_BAAD = 0x43, _MM_PERM_BABA = 0x44, + _MM_PERM_BABB = 0x45, _MM_PERM_BABC = 0x46, _MM_PERM_BABD = 0x47, + _MM_PERM_BACA = 0x48, _MM_PERM_BACB = 0x49, _MM_PERM_BACC = 0x4A, + _MM_PERM_BACD = 0x4B, _MM_PERM_BADA = 0x4C, _MM_PERM_BADB = 0x4D, + _MM_PERM_BADC = 0x4E, _MM_PERM_BADD = 0x4F, _MM_PERM_BBAA = 0x50, + _MM_PERM_BBAB = 0x51, _MM_PERM_BBAC = 0x52, _MM_PERM_BBAD = 0x53, + _MM_PERM_BBBA = 0x54, _MM_PERM_BBBB = 0x55, _MM_PERM_BBBC = 0x56, + _MM_PERM_BBBD = 0x57, _MM_PERM_BBCA = 0x58, _MM_PERM_BBCB = 0x59, + _MM_PERM_BBCC = 0x5A, _MM_PERM_BBCD = 0x5B, _MM_PERM_BBDA = 0x5C, + _MM_PERM_BBDB = 0x5D, _MM_PERM_BBDC = 0x5E, _MM_PERM_BBDD = 0x5F, + _MM_PERM_BCAA = 0x60, _MM_PERM_BCAB = 0x61, _MM_PERM_BCAC = 0x62, + _MM_PERM_BCAD = 0x63, _MM_PERM_BCBA = 0x64, _MM_PERM_BCBB = 0x65, + _MM_PERM_BCBC = 0x66, _MM_PERM_BCBD = 0x67, _MM_PERM_BCCA = 0x68, + _MM_PERM_BCCB = 0x69, _MM_PERM_BCCC = 0x6A, _MM_PERM_BCCD = 0x6B, + _MM_PERM_BCDA = 0x6C, _MM_PERM_BCDB = 0x6D, _MM_PERM_BCDC = 0x6E, + _MM_PERM_BCDD = 0x6F, _MM_PERM_BDAA = 0x70, _MM_PERM_BDAB = 0x71, + _MM_PERM_BDAC = 0x72, _MM_PERM_BDAD = 0x73, _MM_PERM_BDBA = 0x74, + _MM_PERM_BDBB = 0x75, _MM_PERM_BDBC = 0x76, _MM_PERM_BDBD = 0x77, + _MM_PERM_BDCA = 0x78, _MM_PERM_BDCB = 0x79, _MM_PERM_BDCC = 0x7A, + _MM_PERM_BDCD = 0x7B, _MM_PERM_BDDA = 0x7C, _MM_PERM_BDDB = 0x7D, + _MM_PERM_BDDC = 0x7E, _MM_PERM_BDDD = 0x7F, _MM_PERM_CAAA = 0x80, + _MM_PERM_CAAB = 0x81, _MM_PERM_CAAC = 0x82, _MM_PERM_CAAD = 0x83, + _MM_PERM_CABA = 0x84, _MM_PERM_CABB = 0x85, _MM_PERM_CABC = 0x86, + _MM_PERM_CABD = 0x87, _MM_PERM_CACA = 0x88, _MM_PERM_CACB = 0x89, + _MM_PERM_CACC = 0x8A, _MM_PERM_CACD = 0x8B, _MM_PERM_CADA = 0x8C, + _MM_PERM_CADB = 0x8D, _MM_PERM_CADC = 0x8E, _MM_PERM_CADD = 0x8F, + _MM_PERM_CBAA = 0x90, _MM_PERM_CBAB = 0x91, _MM_PERM_CBAC = 0x92, + _MM_PERM_CBAD = 0x93, _MM_PERM_CBBA = 0x94, _MM_PERM_CBBB = 0x95, + _MM_PERM_CBBC = 0x96, _MM_PERM_CBBD = 0x97, _MM_PERM_CBCA = 0x98, + _MM_PERM_CBCB = 0x99, _MM_PERM_CBCC = 0x9A, _MM_PERM_CBCD = 0x9B, + _MM_PERM_CBDA = 0x9C, _MM_PERM_CBDB = 0x9D, _MM_PERM_CBDC = 0x9E, + _MM_PERM_CBDD = 0x9F, _MM_PERM_CCAA = 0xA0, _MM_PERM_CCAB = 0xA1, + _MM_PERM_CCAC = 0xA2, _MM_PERM_CCAD = 0xA3, _MM_PERM_CCBA = 0xA4, + _MM_PERM_CCBB = 0xA5, _MM_PERM_CCBC = 0xA6, _MM_PERM_CCBD = 0xA7, + _MM_PERM_CCCA = 0xA8, _MM_PERM_CCCB = 0xA9, _MM_PERM_CCCC = 0xAA, + _MM_PERM_CCCD = 0xAB, _MM_PERM_CCDA = 0xAC, _MM_PERM_CCDB = 0xAD, + _MM_PERM_CCDC = 0xAE, _MM_PERM_CCDD = 0xAF, _MM_PERM_CDAA = 0xB0, + _MM_PERM_CDAB = 0xB1, _MM_PERM_CDAC = 0xB2, _MM_PERM_CDAD = 0xB3, + _MM_PERM_CDBA = 0xB4, _MM_PERM_CDBB = 0xB5, _MM_PERM_CDBC = 0xB6, + _MM_PERM_CDBD = 0xB7, _MM_PERM_CDCA = 0xB8, _MM_PERM_CDCB = 0xB9, + _MM_PERM_CDCC = 0xBA, _MM_PERM_CDCD = 0xBB, _MM_PERM_CDDA = 0xBC, + _MM_PERM_CDDB = 0xBD, _MM_PERM_CDDC = 0xBE, _MM_PERM_CDDD = 0xBF, + _MM_PERM_DAAA = 0xC0, _MM_PERM_DAAB = 0xC1, _MM_PERM_DAAC = 0xC2, + _MM_PERM_DAAD = 0xC3, _MM_PERM_DABA = 0xC4, _MM_PERM_DABB = 0xC5, + _MM_PERM_DABC = 0xC6, _MM_PERM_DABD = 0xC7, _MM_PERM_DACA = 0xC8, + _MM_PERM_DACB = 0xC9, _MM_PERM_DACC = 0xCA, _MM_PERM_DACD = 0xCB, + _MM_PERM_DADA = 0xCC, _MM_PERM_DADB = 0xCD, _MM_PERM_DADC = 0xCE, + _MM_PERM_DADD = 0xCF, _MM_PERM_DBAA = 0xD0, _MM_PERM_DBAB = 0xD1, + _MM_PERM_DBAC = 0xD2, _MM_PERM_DBAD = 0xD3, _MM_PERM_DBBA = 0xD4, + _MM_PERM_DBBB = 0xD5, _MM_PERM_DBBC = 0xD6, _MM_PERM_DBBD = 0xD7, + _MM_PERM_DBCA = 0xD8, _MM_PERM_DBCB = 0xD9, _MM_PERM_DBCC = 0xDA, + _MM_PERM_DBCD = 0xDB, _MM_PERM_DBDA = 0xDC, _MM_PERM_DBDB = 0xDD, + _MM_PERM_DBDC = 0xDE, _MM_PERM_DBDD = 0xDF, _MM_PERM_DCAA = 0xE0, + _MM_PERM_DCAB = 0xE1, _MM_PERM_DCAC = 0xE2, _MM_PERM_DCAD = 0xE3, + _MM_PERM_DCBA = 0xE4, _MM_PERM_DCBB = 0xE5, _MM_PERM_DCBC = 0xE6, + _MM_PERM_DCBD = 0xE7, _MM_PERM_DCCA = 0xE8, _MM_PERM_DCCB = 0xE9, + _MM_PERM_DCCC = 0xEA, _MM_PERM_DCCD = 0xEB, _MM_PERM_DCDA = 0xEC, + _MM_PERM_DCDB = 0xED, _MM_PERM_DCDC = 0xEE, _MM_PERM_DCDD = 0xEF, + _MM_PERM_DDAA = 0xF0, _MM_PERM_DDAB = 0xF1, _MM_PERM_DDAC = 0xF2, + _MM_PERM_DDAD = 0xF3, _MM_PERM_DDBA = 0xF4, _MM_PERM_DDBB = 0xF5, + _MM_PERM_DDBC = 0xF6, _MM_PERM_DDBD = 0xF7, _MM_PERM_DDCA = 0xF8, + _MM_PERM_DDCB = 0xF9, _MM_PERM_DDCC = 0xFA, _MM_PERM_DDCD = 0xFB, + _MM_PERM_DDDA = 0xFC, _MM_PERM_DDDB = 0xFD, _MM_PERM_DDDC = 0xFE, + _MM_PERM_DDDD = 0xFF +} _MM_PERM_ENUM; + +typedef enum +{ + _MM_MANT_NORM_1_2, /* interval [1, 2) */ + _MM_MANT_NORM_p5_2, /* interval [0.5, 2) */ + _MM_MANT_NORM_p5_1, /* interval [0.5, 1) */ + _MM_MANT_NORM_p75_1p5 /* interval [0.75, 1.5) */ +} _MM_MANTISSA_NORM_ENUM; + +typedef enum +{ + _MM_MANT_SIGN_src, /* sign = sign(SRC) */ + _MM_MANT_SIGN_zero, /* sign = 0 */ + _MM_MANT_SIGN_nan /* DEST = NaN if sign(SRC) = 1 */ +} _MM_MANTISSA_SIGN_ENUM; + /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512f"))) /* Create vectors with repeated elements */ @@ -57,6 +183,81 @@ _mm512_setzero_si512(void) return (__m512i)(__v8di){ 0, 0, 0, 0, 0, 0, 0, 0 }; } +#define _mm512_setzero_epi32 _mm512_setzero_si512 + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_undefined_pd(void) +{ + return (__m512d)__builtin_ia32_undef512(); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_undefined(void) +{ + return (__m512)__builtin_ia32_undef512(); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_undefined_ps(void) +{ + return (__m512)__builtin_ia32_undef512(); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_undefined_epi32(void) +{ + return (__m512i)__builtin_ia32_undef512(); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_broadcastd_epi32 (__m128i __A) +{ + return (__m512i)__builtin_shufflevector((__v4si) __A, + (__v4si)_mm_undefined_si128(), + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_broadcastd_epi32 (__m512i __O, __mmask16 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512(__M, + (__v16si) _mm512_broadcastd_epi32(__A), + (__v16si) __O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_broadcastd_epi32 (__mmask16 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512(__M, + (__v16si) _mm512_broadcastd_epi32(__A), + (__v16si) _mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_broadcastq_epi64 (__m128i __A) +{ + return (__m512i)__builtin_shufflevector((__v2di) __A, + (__v2di) _mm_undefined_si128(), + 0, 0, 0, 0, 0, 0, 0, 0); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_broadcastq_epi64 (__m512i __O, __mmask8 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512(__M, + (__v8di) _mm512_broadcastq_epi64(__A), + (__v8di) __O); + +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512(__M, + (__v8di) _mm512_broadcastq_epi64(__A), + (__v8di) _mm512_setzero_si512()); +} + static __inline __m512i __DEFAULT_FN_ATTRS _mm512_maskz_set1_epi32(__mmask16 __M, int __A) { @@ -88,6 +289,9 @@ _mm512_setzero_ps(void) return (__m512){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }; } + +#define _mm512_setzero _mm512_setzero_ps + static __inline __m512d __DEFAULT_FN_ATTRS _mm512_setzero_pd(void) { @@ -107,6 +311,28 @@ _mm512_set1_pd(double __w) return (__m512d){ __w, __w, __w, __w, __w, __w, __w, __w }; } +static __inline __m512i __DEFAULT_FN_ATTRS +_mm512_set1_epi8(char __w) +{ + return (__m512i)(__v64qi){ __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w }; +} + +static __inline __m512i __DEFAULT_FN_ATTRS +_mm512_set1_epi16(short __w) +{ + return (__m512i)(__v32hi){ __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w }; +} + static __inline __m512i __DEFAULT_FN_ATTRS _mm512_set1_epi32(int __s) { @@ -121,21 +347,62 @@ _mm512_set1_epi64(long long __d) } static __inline__ __m512 __DEFAULT_FN_ATTRS -_mm512_broadcastss_ps(__m128 __X) +_mm512_broadcastss_ps(__m128 __A) { - float __f = __X[0]; - return (__v16sf){ __f, __f, __f, __f, - __f, __f, __f, __f, - __f, __f, __f, __f, - __f, __f, __f, __f }; + return (__m512)__builtin_shufflevector((__v4sf) __A, + (__v4sf)_mm_undefined_ps(), + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); } -static __inline__ __m512d __DEFAULT_FN_ATTRS -_mm512_broadcastsd_pd(__m128d __X) +static __inline __m512i __DEFAULT_FN_ATTRS +_mm512_set4_epi32 (int __A, int __B, int __C, int __D) { - double __d = __X[0]; - return (__v8df){ __d, __d, __d, __d, - __d, __d, __d, __d }; + return (__m512i)(__v16si) + { __D, __C, __B, __A, __D, __C, __B, __A, + __D, __C, __B, __A, __D, __C, __B, __A }; +} + +static __inline __m512i __DEFAULT_FN_ATTRS +_mm512_set4_epi64 (long long __A, long long __B, long long __C, + long long __D) +{ + return (__m512i) (__v8di) + { __D, __C, __B, __A, __D, __C, __B, __A }; +} + +static __inline __m512d __DEFAULT_FN_ATTRS +_mm512_set4_pd (double __A, double __B, double __C, double __D) +{ + return (__m512d) + { __D, __C, __B, __A, __D, __C, __B, __A }; +} + +static __inline __m512 __DEFAULT_FN_ATTRS +_mm512_set4_ps (float __A, float __B, float __C, float __D) +{ + return (__m512) + { __D, __C, __B, __A, __D, __C, __B, __A, + __D, __C, __B, __A, __D, __C, __B, __A }; +} + +#define _mm512_setr4_epi32(e0,e1,e2,e3) \ + _mm512_set4_epi32((e3),(e2),(e1),(e0)) + +#define _mm512_setr4_epi64(e0,e1,e2,e3) \ + _mm512_set4_epi64((e3),(e2),(e1),(e0)) + +#define _mm512_setr4_pd(e0,e1,e2,e3) \ + _mm512_set4_pd((e3),(e2),(e1),(e0)) + +#define _mm512_setr4_ps(e0,e1,e2,e3) \ + _mm512_set4_ps((e3),(e2),(e1),(e0)) + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_broadcastsd_pd(__m128d __A) +{ + return (__m512d)__builtin_shufflevector((__v2df) __A, + (__v2df) _mm_undefined_pd(), + 0, 0, 0, 0, 0, 0, 0, 0); } /* Cast between vector types */ @@ -159,368 +426,441 @@ _mm512_castpd512_pd128(__m512d __a) return __builtin_shufflevector(__a, __a, 0, 1); } +static __inline __m256d __DEFAULT_FN_ATTRS +_mm512_castpd512_pd256 (__m512d __A) +{ + return __builtin_shufflevector(__A, __A, 0, 1, 2, 3); +} + static __inline __m128 __DEFAULT_FN_ATTRS _mm512_castps512_ps128(__m512 __a) { return __builtin_shufflevector(__a, __a, 0, 1, 2, 3); } +static __inline __m256 __DEFAULT_FN_ATTRS +_mm512_castps512_ps256 (__m512 __A) +{ + return __builtin_shufflevector(__A, __A, 0, 1, 2, 3, 4, 5, 6, 7); +} + +static __inline __m512 __DEFAULT_FN_ATTRS +_mm512_castpd_ps (__m512d __A) +{ + return (__m512) (__A); +} + +static __inline __m512i __DEFAULT_FN_ATTRS +_mm512_castpd_si512 (__m512d __A) +{ + return (__m512i) (__A); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_castpd128_pd512 (__m128d __A) +{ + return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1); +} + +static __inline __m512d __DEFAULT_FN_ATTRS +_mm512_castps_pd (__m512 __A) +{ + return (__m512d) (__A); +} + +static __inline __m512i __DEFAULT_FN_ATTRS +_mm512_castps_si512 (__m512 __A) +{ + return (__m512i) (__A); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_castps128_ps512 (__m128 __A) +{ + return __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_castsi128_si512 (__m128i __A) +{ + return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_castsi256_si512 (__m256i __A) +{ + return __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1); +} + +static __inline __m512 __DEFAULT_FN_ATTRS +_mm512_castsi512_ps (__m512i __A) +{ + return (__m512) (__A); +} + +static __inline __m512d __DEFAULT_FN_ATTRS +_mm512_castsi512_pd (__m512i __A) +{ + return (__m512d) (__A); +} + +static __inline __m128i __DEFAULT_FN_ATTRS +_mm512_castsi512_si128 (__m512i __A) +{ + return (__m128i)__builtin_shufflevector(__A, __A , 0, 1); +} + +static __inline __m256i __DEFAULT_FN_ATTRS +_mm512_castsi512_si256 (__m512i __A) +{ + return (__m256i)__builtin_shufflevector(__A, __A , 0, 1, 2, 3); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_int2mask(int __a) +{ + return (__mmask16)__a; +} + +static __inline__ int __DEFAULT_FN_ATTRS +_mm512_mask2int(__mmask16 __a) +{ + return (int)__a; +} + /* Bitwise operators */ static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_and_epi32(__m512i __a, __m512i __b) { - return __a & __b; + return (__m512i)((__v16su)__a & (__v16su)__b); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_and_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b) { - return (__m512i) __builtin_ia32_pandd512_mask((__v16si) __a, - (__v16si) __b, - (__v16si) __src, - (__mmask16) __k); + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k, + (__v16si) _mm512_and_epi32(__a, __b), + (__v16si) __src); } + static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_and_epi32(__mmask16 __k, __m512i __a, __m512i __b) { - return (__m512i) __builtin_ia32_pandd512_mask((__v16si) __a, - (__v16si) __b, - (__v16si) - _mm512_setzero_si512 (), - (__mmask16) __k); + return (__m512i) _mm512_mask_and_epi32(_mm512_setzero_si512 (), + __k, __a, __b); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_and_epi64(__m512i __a, __m512i __b) { - return __a & __b; + return (__m512i)((__v8du)__a & (__v8du)__b); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_and_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b) { - return (__m512i) __builtin_ia32_pandq512_mask ((__v8di) __a, - (__v8di) __b, - (__v8di) __src, - (__mmask8) __k); + return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __k, + (__v8di) _mm512_and_epi64(__a, __b), + (__v8di) __src); } + static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_and_epi64(__mmask8 __k, __m512i __a, __m512i __b) { - return (__m512i) __builtin_ia32_pandq512_mask ((__v8di) __a, - (__v8di) __b, - (__v8di) - _mm512_setzero_si512 (), - (__mmask8) __k); + return (__m512i) _mm512_mask_and_epi64(_mm512_setzero_si512 (), + __k, __a, __b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_andnot_si512 (__m512i __A, __m512i __B) +{ + return (__m512i)(~(__v8du)(__A) & (__v8du)__B); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_andnot_epi32 (__m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_pandnd512_mask ((__v16si) __A, - (__v16si) __B, - (__v16si) - _mm512_setzero_si512 (), - (__mmask16) -1); + return (__m512i)(~(__v16su)(__A) & (__v16su)__B); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_mask_andnot_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +_mm512_mask_andnot_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_pandnd512_mask ((__v16si) __A, - (__v16si) __B, - (__v16si) __W, - (__mmask16) __U); + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_andnot_epi32(__A, __B), + (__v16si)__W); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_maskz_andnot_epi32 (__mmask16 __U, __m512i __A, __m512i __B) +_mm512_maskz_andnot_epi32(__mmask16 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_pandnd512_mask ((__v16si) __A, - (__v16si) __B, - (__v16si) - _mm512_setzero_si512 (), - (__mmask16) __U); + return (__m512i)_mm512_mask_andnot_epi32(_mm512_setzero_si512(), + __U, __A, __B); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_andnot_epi64 (__m512i __A, __m512i __B) +_mm512_andnot_epi64(__m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_pandnq512_mask ((__v8di) __A, - (__v8di) __B, - (__v8di) - _mm512_setzero_si512 (), - (__mmask8) -1); + return (__m512i)(~(__v8du)(__A) & (__v8du)__B); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_mask_andnot_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +_mm512_mask_andnot_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_pandnq512_mask ((__v8di) __A, - (__v8di) __B, - (__v8di) __W, __U); + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_andnot_epi64(__A, __B), + (__v8di)__W); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_maskz_andnot_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +_mm512_maskz_andnot_epi64(__mmask8 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_pandnq512_mask ((__v8di) __A, - (__v8di) __B, - (__v8di) - _mm512_setzero_pd (), - __U); + return (__m512i)_mm512_mask_andnot_epi64(_mm512_setzero_si512(), + __U, __A, __B); } + static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_or_epi32(__m512i __a, __m512i __b) { - return __a | __b; + return (__m512i)((__v16su)__a | (__v16su)__b); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_or_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b) { - return (__m512i) __builtin_ia32_pord512_mask((__v16si) __a, - (__v16si) __b, - (__v16si) __src, - (__mmask16) __k); + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k, + (__v16si)_mm512_or_epi32(__a, __b), + (__v16si)__src); } + static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_or_epi32(__mmask16 __k, __m512i __a, __m512i __b) { - return (__m512i) __builtin_ia32_pord512_mask((__v16si) __a, - (__v16si) __b, - (__v16si) - _mm512_setzero_si512 (), - (__mmask16) __k); + return (__m512i)_mm512_mask_or_epi32(_mm512_setzero_si512(), __k, __a, __b); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_or_epi64(__m512i __a, __m512i __b) { - return __a | __b; + return (__m512i)((__v8du)__a | (__v8du)__b); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_or_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b) { - return (__m512i) __builtin_ia32_porq512_mask ((__v8di) __a, - (__v8di) __b, - (__v8di) __src, - (__mmask8) __k); + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__k, + (__v8di)_mm512_or_epi64(__a, __b), + (__v8di)__src); } + static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_or_epi64(__mmask8 __k, __m512i __a, __m512i __b) { - return (__m512i) __builtin_ia32_porq512_mask ((__v8di) __a, - (__v8di) __b, - (__v8di) - _mm512_setzero_si512 (), - (__mmask8) __k); + return (__m512i)_mm512_mask_or_epi64(_mm512_setzero_si512(), __k, __a, __b); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_xor_epi32(__m512i __a, __m512i __b) { - return __a ^ __b; + return (__m512i)((__v16su)__a ^ (__v16su)__b); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_xor_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b) { - return (__m512i) __builtin_ia32_pxord512_mask((__v16si) __a, - (__v16si) __b, - (__v16si) __src, - (__mmask16) __k); + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k, + (__v16si)_mm512_xor_epi32(__a, __b), + (__v16si)__src); } + static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_xor_epi32(__mmask16 __k, __m512i __a, __m512i __b) { - return (__m512i) __builtin_ia32_pxord512_mask((__v16si) __a, - (__v16si) __b, - (__v16si) - _mm512_setzero_si512 (), - (__mmask16) __k); + return (__m512i)_mm512_mask_xor_epi32(_mm512_setzero_si512(), __k, __a, __b); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_xor_epi64(__m512i __a, __m512i __b) { - return __a ^ __b; + return (__m512i)((__v8du)__a ^ (__v8du)__b); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_xor_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b) { - return (__m512i) __builtin_ia32_pxorq512_mask ((__v8di) __a, - (__v8di) __b, - (__v8di) __src, - (__mmask8) __k); + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__k, + (__v8di)_mm512_xor_epi64(__a, __b), + (__v8di)__src); } + static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_xor_epi64(__mmask8 __k, __m512i __a, __m512i __b) { - return (__m512i) __builtin_ia32_pxorq512_mask ((__v8di) __a, - (__v8di) __b, - (__v8di) - _mm512_setzero_si512 (), - (__mmask8) __k); + return (__m512i)_mm512_mask_xor_epi64(_mm512_setzero_si512(), __k, __a, __b); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_and_si512(__m512i __a, __m512i __b) { - return __a & __b; + return (__m512i)((__v8du)__a & (__v8du)__b); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_or_si512(__m512i __a, __m512i __b) { - return __a | __b; + return (__m512i)((__v8du)__a | (__v8du)__b); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_xor_si512(__m512i __a, __m512i __b) { - return __a ^ __b; + return (__m512i)((__v8du)__a ^ (__v8du)__b); } + /* Arithmetic */ static __inline __m512d __DEFAULT_FN_ATTRS _mm512_add_pd(__m512d __a, __m512d __b) { - return __a + __b; + return (__m512d)((__v8df)__a + (__v8df)__b); } static __inline __m512 __DEFAULT_FN_ATTRS _mm512_add_ps(__m512 __a, __m512 __b) { - return __a + __b; + return (__m512)((__v16sf)__a + (__v16sf)__b); } static __inline __m512d __DEFAULT_FN_ATTRS _mm512_mul_pd(__m512d __a, __m512d __b) { - return __a * __b; + return (__m512d)((__v8df)__a * (__v8df)__b); } static __inline __m512 __DEFAULT_FN_ATTRS _mm512_mul_ps(__m512 __a, __m512 __b) { - return __a * __b; + return (__m512)((__v16sf)__a * (__v16sf)__b); } static __inline __m512d __DEFAULT_FN_ATTRS _mm512_sub_pd(__m512d __a, __m512d __b) { - return __a - __b; + return (__m512d)((__v8df)__a - (__v8df)__b); } static __inline __m512 __DEFAULT_FN_ATTRS _mm512_sub_ps(__m512 __a, __m512 __b) { - return __a - __b; + return (__m512)((__v16sf)__a - (__v16sf)__b); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_add_epi64 (__m512i __A, __m512i __B) { - return (__m512i) ((__v8di) __A + (__v8di) __B); + return (__m512i) ((__v8du) __A + (__v8du) __B); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_mask_add_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +_mm512_mask_add_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_paddq512_mask ((__v8di) __A, - (__v8di) __B, - (__v8di) __W, - (__mmask8) __U); + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_add_epi64(__A, __B), + (__v8di)__W); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_maskz_add_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +_mm512_maskz_add_epi64(__mmask8 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_paddq512_mask ((__v8di) __A, - (__v8di) __B, - (__v8di) - _mm512_setzero_si512 (), - (__mmask8) __U); + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_add_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_sub_epi64 (__m512i __A, __m512i __B) { - return (__m512i) ((__v8di) __A - (__v8di) __B); + return (__m512i) ((__v8du) __A - (__v8du) __B); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_mask_sub_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +_mm512_mask_sub_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_psubq512_mask ((__v8di) __A, - (__v8di) __B, - (__v8di) __W, - (__mmask8) __U); + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_sub_epi64(__A, __B), + (__v8di)__W); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_maskz_sub_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +_mm512_maskz_sub_epi64(__mmask8 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_psubq512_mask ((__v8di) __A, - (__v8di) __B, - (__v8di) - _mm512_setzero_si512 (), - (__mmask8) __U); + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_sub_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_add_epi32 (__m512i __A, __m512i __B) { - return (__m512i) ((__v16si) __A + (__v16si) __B); + return (__m512i) ((__v16su) __A + (__v16su) __B); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_mask_add_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +_mm512_mask_add_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_paddd512_mask ((__v16si) __A, - (__v16si) __B, - (__v16si) __W, - (__mmask16) __U); + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_add_epi32(__A, __B), + (__v16si)__W); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_add_epi32 (__mmask16 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_paddd512_mask ((__v16si) __A, - (__v16si) __B, - (__v16si) - _mm512_setzero_si512 (), - (__mmask16) __U); + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_add_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_sub_epi32 (__m512i __A, __m512i __B) { - return (__m512i) ((__v16si) __A - (__v16si) __B); + return (__m512i) ((__v16su) __A - (__v16su) __B); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_mask_sub_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +_mm512_mask_sub_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_psubd512_mask ((__v16si) __A, - (__v16si) __B, - (__v16si) __W, - (__mmask16) __U); + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_sub_epi32(__A, __B), + (__v16si)__W); } static __inline__ __m512i __DEFAULT_FN_ATTRS -_mm512_maskz_sub_epi32 (__mmask16 __U, __m512i __A, __m512i __B) +_mm512_maskz_sub_epi32(__mmask16 __U, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_psubd512_mask ((__v16si) __A, - (__v16si) __B, - (__v16si) - _mm512_setzero_si512 (), - (__mmask16) __U); + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_sub_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); } +#define _mm512_mask_max_round_pd(W, U, A, B, R) __extension__ ({ \ + (__m512d)__builtin_ia32_maxpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(W), (__mmask8)(U), \ + (int)(R)); }) + +#define _mm512_maskz_max_round_pd(U, A, B, R) __extension__ ({ \ + (__m512d)__builtin_ia32_maxpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_max_round_pd(A, B, R) __extension__ ({ \ + (__m512d)__builtin_ia32_maxpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)_mm512_undefined_pd(), \ + (__mmask8)-1, (int)(R)); }) + static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_max_pd(__m512d __A, __m512d __B) { @@ -532,6 +872,45 @@ _mm512_max_pd(__m512d __A, __m512d __B) _MM_FROUND_CUR_DIRECTION); } +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_max_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_maxpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_max_pd (__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_maxpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask_max_round_ps(W, U, A, B, R) __extension__ ({ \ + (__m512)__builtin_ia32_maxps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(W), (__mmask16)(U), \ + (int)(R)); }) + +#define _mm512_maskz_max_round_ps(U, A, B, R) __extension__ ({ \ + (__m512)__builtin_ia32_maxps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R)); }) + +#define _mm512_max_round_ps(A, B, R) __extension__ ({ \ + (__m512)__builtin_ia32_maxps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1, (int)(R)); }) + static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_max_ps(__m512 __A, __m512 __B) { @@ -543,6 +922,99 @@ _mm512_max_ps(__m512 __A, __m512 __B) _MM_FROUND_CUR_DIRECTION); } +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_max_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_maxps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_max_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_maxps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_max_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_max_ss(__mmask8 __U,__m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_max_round_ss(A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm_mask_max_round_ss(W, U, A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(R)); }) + +#define _mm_maskz_max_round_ss(U, A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_max_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_maxsd_round_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_max_sd(__mmask8 __U,__m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_maxsd_round_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_max_round_sd(A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm_mask_max_round_sd(W, U, A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm_maskz_max_round_sd(U, A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + static __inline __m512i __DEFAULT_FN_ATTRS _mm512_max_epi32(__m512i __A, __m512i __B) @@ -554,6 +1026,24 @@ _mm512_max_epi32(__m512i __A, __m512i __B) (__mmask16) -1); } +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_max_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxsd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_max_epi32 (__mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxsd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + __M); +} + static __inline __m512i __DEFAULT_FN_ATTRS _mm512_max_epu32(__m512i __A, __m512i __B) { @@ -564,6 +1054,24 @@ _mm512_max_epu32(__m512i __A, __m512i __B) (__mmask16) -1); } +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_max_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxud512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_max_epu32 (__mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxud512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + __M); +} + static __inline __m512i __DEFAULT_FN_ATTRS _mm512_max_epi64(__m512i __A, __m512i __B) { @@ -574,6 +1082,24 @@ _mm512_max_epi64(__m512i __A, __m512i __B) (__mmask8) -1); } +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_max_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxsq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_max_epi64 (__mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxsq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + __M); +} + static __inline __m512i __DEFAULT_FN_ATTRS _mm512_max_epu64(__m512i __A, __m512i __B) { @@ -584,6 +1110,42 @@ _mm512_max_epu64(__m512i __A, __m512i __B) (__mmask8) -1); } +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_max_epu64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxuq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_max_epu64 (__mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxuq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + __M); +} + +#define _mm512_mask_min_round_pd(W, U, A, B, R) __extension__ ({ \ + (__m512d)__builtin_ia32_minpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(W), (__mmask8)(U), \ + (int)(R)); }) + +#define _mm512_maskz_min_round_pd(U, A, B, R) __extension__ ({ \ + (__m512d)__builtin_ia32_minpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_min_round_pd(A, B, R) __extension__ ({ \ + (__m512d)__builtin_ia32_minpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)_mm512_undefined_pd(), \ + (__mmask8)-1, (int)(R)); }) + static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_min_pd(__m512d __A, __m512d __B) { @@ -595,6 +1157,45 @@ _mm512_min_pd(__m512d __A, __m512d __B) _MM_FROUND_CUR_DIRECTION); } +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_min_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_minpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask_min_round_ps(W, U, A, B, R) __extension__ ({ \ + (__m512)__builtin_ia32_minps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(W), (__mmask16)(U), \ + (int)(R)); }) + +#define _mm512_maskz_min_round_ps(U, A, B, R) __extension__ ({ \ + (__m512)__builtin_ia32_minps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R)); }) + +#define _mm512_min_round_ps(A, B, R) __extension__ ({ \ + (__m512)__builtin_ia32_minps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1, (int)(R)); }) + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_min_pd (__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_minpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_min_ps(__m512 __A, __m512 __B) { @@ -606,6 +1207,99 @@ _mm512_min_ps(__m512 __A, __m512 __B) _MM_FROUND_CUR_DIRECTION); } +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_min_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_minps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_min_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_minps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_min_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_minss_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_min_ss(__mmask8 __U,__m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_minss_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_min_round_ss(A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm_mask_min_round_ss(W, U, A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(R)); }) + +#define _mm_maskz_min_round_ss(U, A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_min_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_minsd_round_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_min_sd(__mmask8 __U,__m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_minsd_round_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_min_round_sd(A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm_mask_min_round_sd(W, U, A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm_maskz_min_round_sd(U, A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + static __inline __m512i __DEFAULT_FN_ATTRS _mm512_min_epi32(__m512i __A, __m512i __B) @@ -617,6 +1311,24 @@ _mm512_min_epi32(__m512i __A, __m512i __B) (__mmask16) -1); } +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_min_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminsd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_min_epi32 (__mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminsd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + __M); +} + static __inline __m512i __DEFAULT_FN_ATTRS _mm512_min_epu32(__m512i __A, __m512i __B) { @@ -627,6 +1339,24 @@ _mm512_min_epu32(__m512i __A, __m512i __B) (__mmask16) -1); } +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_min_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminud512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_min_epu32 (__mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminud512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + __M); +} + static __inline __m512i __DEFAULT_FN_ATTRS _mm512_min_epi64(__m512i __A, __m512i __B) { @@ -637,6 +1367,24 @@ _mm512_min_epi64(__m512i __A, __m512i __B) (__mmask8) -1); } +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_min_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminsq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_min_epi64 (__mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminsq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + __M); +} + static __inline __m512i __DEFAULT_FN_ATTRS _mm512_min_epu64(__m512i __A, __m512i __B) { @@ -647,104 +1395,175 @@ _mm512_min_epu64(__m512i __A, __m512i __B) (__mmask8) -1); } +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_min_epu64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminuq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_min_epu64 (__mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminuq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + __M); +} + static __inline __m512i __DEFAULT_FN_ATTRS _mm512_mul_epi32(__m512i __X, __m512i __Y) { - return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X, - (__v16si) __Y, - (__v8di) - _mm512_setzero_si512 (), - (__mmask8) -1); + return (__m512i)__builtin_ia32_pmuldq512((__v16si)__X, (__v16si) __Y); } static __inline __m512i __DEFAULT_FN_ATTRS -_mm512_mask_mul_epi32 (__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) +_mm512_mask_mul_epi32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) { - return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X, - (__v16si) __Y, - (__v8di) __W, __M); + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_mul_epi32(__X, __Y), + (__v8di)__W); } static __inline __m512i __DEFAULT_FN_ATTRS -_mm512_maskz_mul_epi32 (__mmask8 __M, __m512i __X, __m512i __Y) +_mm512_maskz_mul_epi32(__mmask8 __M, __m512i __X, __m512i __Y) { - return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X, - (__v16si) __Y, - (__v8di) - _mm512_setzero_si512 (), - __M); + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_mul_epi32(__X, __Y), + (__v8di)_mm512_setzero_si512 ()); } static __inline __m512i __DEFAULT_FN_ATTRS _mm512_mul_epu32(__m512i __X, __m512i __Y) { - return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X, - (__v16si) __Y, - (__v8di) - _mm512_setzero_si512 (), - (__mmask8) -1); + return (__m512i)__builtin_ia32_pmuludq512((__v16si)__X, (__v16si)__Y); } static __inline __m512i __DEFAULT_FN_ATTRS -_mm512_mask_mul_epu32 (__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) +_mm512_mask_mul_epu32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) { - return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X, - (__v16si) __Y, - (__v8di) __W, __M); + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_mul_epu32(__X, __Y), + (__v8di)__W); } static __inline __m512i __DEFAULT_FN_ATTRS -_mm512_maskz_mul_epu32 (__mmask8 __M, __m512i __X, __m512i __Y) +_mm512_maskz_mul_epu32(__mmask8 __M, __m512i __X, __m512i __Y) { - return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X, - (__v16si) __Y, - (__v8di) - _mm512_setzero_si512 (), - __M); + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_mul_epu32(__X, __Y), + (__v8di)_mm512_setzero_si512 ()); } static __inline __m512i __DEFAULT_FN_ATTRS _mm512_mullo_epi32 (__m512i __A, __m512i __B) { - return (__m512i) ((__v16si) __A * (__v16si) __B); + return (__m512i) ((__v16su) __A * (__v16su) __B); } static __inline __m512i __DEFAULT_FN_ATTRS -_mm512_maskz_mullo_epi32 (__mmask16 __M, __m512i __A, __m512i __B) +_mm512_maskz_mullo_epi32(__mmask16 __M, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_pmulld512_mask ((__v16si) __A, - (__v16si) __B, - (__v16si) - _mm512_setzero_si512 (), - __M); + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_mullo_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); } static __inline __m512i __DEFAULT_FN_ATTRS -_mm512_mask_mullo_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) +_mm512_mask_mullo_epi32(__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) { - return (__m512i) __builtin_ia32_pmulld512_mask ((__v16si) __A, - (__v16si) __B, - (__v16si) __W, __M); + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_mullo_epi32(__A, __B), + (__v16si)__W); } +#define _mm512_mask_sqrt_round_pd(W, U, A, R) __extension__ ({ \ + (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(W), (__mmask8)(U), \ + (int)(R)); }) + +#define _mm512_maskz_sqrt_round_pd(U, A, R) __extension__ ({ \ + (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_sqrt_round_pd(A, R) __extension__ ({ \ + (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_undefined_pd(), \ + (__mmask8)-1, (int)(R)); }) + static __inline__ __m512d __DEFAULT_FN_ATTRS -_mm512_sqrt_pd(__m512d a) +_mm512_sqrt_pd(__m512d __a) { - return (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)a, + return (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)__a, (__v8df) _mm512_setzero_pd (), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION); } -static __inline__ __m512 __DEFAULT_FN_ATTRS -_mm512_sqrt_ps(__m512 a) +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_sqrt_pd (__m512d __W, __mmask8 __U, __m512d __A) { - return (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)a, + return (__m512d) __builtin_ia32_sqrtpd512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_sqrt_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_sqrtpd512_mask ((__v8df) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask_sqrt_round_ps(W, U, A, R) __extension__ ({ \ + (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(W), (__mmask16)(U), \ + (int)(R)); }) + +#define _mm512_maskz_sqrt_round_ps(U, A, R) __extension__ ({ \ + (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R)); }) + +#define _mm512_sqrt_round_ps(A, R) __extension__ ({ \ + (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1, (int)(R)); }) + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_sqrt_ps(__m512 __a) +{ + return (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)__a, (__v16sf) _mm512_setzero_ps (), (__mmask16) -1, _MM_FROUND_CUR_DIRECTION); } +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_sqrt_ps(__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)__A, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_sqrt_ps( __mmask16 __U, __m512 __A) +{ + return (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)__A, + (__v16sf) _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_rsqrt14_pd(__m512d __A) { @@ -753,6 +1572,23 @@ _mm512_rsqrt14_pd(__m512d __A) _mm512_setzero_pd (), (__mmask8) -1);} +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_rsqrt14_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_rsqrt14_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_rsqrt14_ps(__m512 __A) { @@ -762,6 +1598,23 @@ _mm512_rsqrt14_ps(__m512 __A) (__mmask16) -1); } +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_rsqrt14_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_rsqrt14_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_rsqrt14_ss(__m128 __A, __m128 __B) { @@ -772,6 +1625,24 @@ _mm_rsqrt14_ss(__m128 __A, __m128 __B) (__mmask8) -1); } +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_rsqrt14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_rsqrt14_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U); +} + static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_rsqrt14_sd(__m128d __A, __m128d __B) { @@ -782,6 +1653,24 @@ _mm_rsqrt14_sd(__m128d __A, __m128d __B) (__mmask8) -1); } +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_rsqrt14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_rsqrt14_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U); +} + static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_rcp14_pd(__m512d __A) { @@ -791,6 +1680,23 @@ _mm512_rcp14_pd(__m512d __A) (__mmask8) -1); } +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_rcp14_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_rcp14_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_rcp14_ps(__m512 __A) { @@ -799,6 +1705,24 @@ _mm512_rcp14_ps(__m512 __A) _mm512_setzero_ps (), (__mmask16) -1); } + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_rcp14_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_rcp14_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_rcp14_ss(__m128 __A, __m128 __B) { @@ -809,6 +1733,24 @@ _mm_rcp14_ss(__m128 __A, __m128 __B) (__mmask8) -1); } +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_rcp14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_rcp14_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U); +} + static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_rcp14_sd(__m128d __A, __m128d __B) { @@ -819,6 +1761,24 @@ _mm_rcp14_sd(__m128d __A, __m128d __B) (__mmask8) -1); } +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_rcp14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_rcp14_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U); +} + static __inline __m512 __DEFAULT_FN_ATTRS _mm512_floor_ps(__m512 __A) { @@ -828,6 +1788,15 @@ _mm512_floor_ps(__m512 __A) _MM_FROUND_CUR_DIRECTION); } +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_floor_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A, + _MM_FROUND_FLOOR, + (__v16sf) __W, __U, + _MM_FROUND_CUR_DIRECTION); +} + static __inline __m512d __DEFAULT_FN_ATTRS _mm512_floor_pd(__m512d __A) { @@ -837,6 +1806,24 @@ _mm512_floor_pd(__m512d __A) _MM_FROUND_CUR_DIRECTION); } +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_floor_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A, + _MM_FROUND_FLOOR, + (__v8df) __W, __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_ceil_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A, + _MM_FROUND_CEIL, + (__v16sf) __W, __U, + _MM_FROUND_CUR_DIRECTION); +} + static __inline __m512 __DEFAULT_FN_ATTRS _mm512_ceil_ps(__m512 __A) { @@ -855,6 +1842,15 @@ _mm512_ceil_pd(__m512d __A) _MM_FROUND_CUR_DIRECTION); } +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_ceil_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A, + _MM_FROUND_CEIL, + (__v8df) __W, __U, + _MM_FROUND_CUR_DIRECTION); +} + static __inline __m512i __DEFAULT_FN_ATTRS _mm512_abs_epi64(__m512i __A) { @@ -864,6 +1860,23 @@ _mm512_abs_epi64(__m512i __A) (__mmask8) -1); } +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_abs_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_pabsq512_mask ((__v8di) __A, + (__v8di) __W, + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_abs_epi64 (__mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_pabsq512_mask ((__v8di) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + static __inline __m512i __DEFAULT_FN_ATTRS _mm512_abs_epi32(__m512i __A) { @@ -873,84 +1886,720 @@ _mm512_abs_epi32(__m512i __A) (__mmask16) -1); } +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_abs_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_pabsd512_mask ((__v16si) __A, + (__v16si) __W, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_abs_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_pabsd512_mask ((__v16si) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_add_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_addss_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_add_ss(__mmask8 __U,__m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_addss_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_add_round_ss(A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm_mask_add_round_ss(W, U, A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(R)); }) + +#define _mm_maskz_add_round_ss(U, A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_add_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_addsd_round_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_add_sd(__mmask8 __U,__m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_addsd_round_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} +#define _mm_add_round_sd(A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm_mask_add_round_sd(W, U, A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm_maskz_add_round_sd(U, A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_add_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_add_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_add_pd(__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_add_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_add_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_add_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_add_ps(__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_add_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +#define _mm512_add_round_pd(A, B, R) __extension__ ({ \ + (__m512d)__builtin_ia32_addpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_add_round_pd(W, U, A, B, R) __extension__ ({ \ + (__m512d)__builtin_ia32_addpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(W), (__mmask8)(U), \ + (int)(R)); }) + +#define _mm512_maskz_add_round_pd(U, A, B, R) __extension__ ({ \ + (__m512d)__builtin_ia32_addpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_add_round_ps(A, B, R) __extension__ ({ \ + (__m512)__builtin_ia32_addps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, (int)(R)); }) + +#define _mm512_mask_add_round_ps(W, U, A, B, R) __extension__ ({ \ + (__m512)__builtin_ia32_addps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(W), (__mmask16)(U), \ + (int)(R)); }) + +#define _mm512_maskz_add_round_ps(U, A, B, R) __extension__ ({ \ + (__m512)__builtin_ia32_addps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R)); }) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_sub_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_subss_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_sub_ss(__mmask8 __U,__m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_subss_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} +#define _mm_sub_round_ss(A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm_mask_sub_round_ss(W, U, A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(R)); }) + +#define _mm_maskz_sub_round_ss(U, A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_sub_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_subsd_round_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_sub_sd(__mmask8 __U,__m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_subsd_round_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_sub_round_sd(A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm_mask_sub_round_sd(W, U, A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm_maskz_sub_round_sd(U, A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_sub_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_sub_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_sub_pd(__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_sub_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_sub_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_sub_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_sub_ps(__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_sub_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +#define _mm512_sub_round_pd(A, B, R) __extension__ ({ \ + (__m512d)__builtin_ia32_subpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_sub_round_pd(W, U, A, B, R) __extension__ ({ \ + (__m512d)__builtin_ia32_subpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(W), (__mmask8)(U), \ + (int)(R)); }) + +#define _mm512_maskz_sub_round_pd(U, A, B, R) __extension__ ({ \ + (__m512d)__builtin_ia32_subpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_sub_round_ps(A, B, R) __extension__ ({ \ + (__m512)__builtin_ia32_subps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, (int)(R)); }) + +#define _mm512_mask_sub_round_ps(W, U, A, B, R) __extension__ ({ \ + (__m512)__builtin_ia32_subps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(W), (__mmask16)(U), \ + (int)(R)); }); + +#define _mm512_maskz_sub_round_ps(U, A, B, R) __extension__ ({ \ + (__m512)__builtin_ia32_subps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R)); }); + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_mul_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_mulss_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_mul_ss(__mmask8 __U,__m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_mulss_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} +#define _mm_mul_round_ss(A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm_mask_mul_round_ss(W, U, A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(R)); }) + +#define _mm_maskz_mul_round_ss(U, A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_mul_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_mulsd_round_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_mul_sd(__mmask8 __U,__m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_mulsd_round_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mul_round_sd(A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm_mask_mul_round_sd(W, U, A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm_maskz_mul_round_sd(U, A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_mul_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_mul_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_mul_pd(__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_mul_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_mul_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_mul_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_mul_ps(__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_mul_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +#define _mm512_mul_round_pd(A, B, R) __extension__ ({ \ + (__m512d)__builtin_ia32_mulpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_mul_round_pd(W, U, A, B, R) __extension__ ({ \ + (__m512d)__builtin_ia32_mulpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(W), (__mmask8)(U), \ + (int)(R)); }) + +#define _mm512_maskz_mul_round_pd(U, A, B, R) __extension__ ({ \ + (__m512d)__builtin_ia32_mulpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_mul_round_ps(A, B, R) __extension__ ({ \ + (__m512)__builtin_ia32_mulps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, (int)(R)); }) + +#define _mm512_mask_mul_round_ps(W, U, A, B, R) __extension__ ({ \ + (__m512)__builtin_ia32_mulps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(W), (__mmask16)(U), \ + (int)(R)); }); + +#define _mm512_maskz_mul_round_ps(U, A, B, R) __extension__ ({ \ + (__m512)__builtin_ia32_mulps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R)); }); + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_div_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_divss_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_div_ss(__mmask8 __U,__m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_divss_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_div_round_ss(A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm_mask_div_round_ss(W, U, A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(R)); }) + +#define _mm_maskz_div_round_ss(U, A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_div_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_divsd_round_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_div_sd(__mmask8 __U,__m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_divsd_round_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_div_round_sd(A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm_mask_div_round_sd(W, U, A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm_maskz_div_round_sd(U, A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline __m512d __DEFAULT_FN_ATTRS +_mm512_div_pd(__m512d __a, __m512d __b) +{ + return (__m512d)((__v8df)__a/(__v8df)__b); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_div_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_div_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_div_pd(__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_div_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline __m512 __DEFAULT_FN_ATTRS +_mm512_div_ps(__m512 __a, __m512 __b) +{ + return (__m512)((__v16sf)__a/(__v16sf)__b); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_div_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_div_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_div_ps(__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_div_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +#define _mm512_div_round_pd(A, B, R) __extension__ ({ \ + (__m512d)__builtin_ia32_divpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_div_round_pd(W, U, A, B, R) __extension__ ({ \ + (__m512d)__builtin_ia32_divpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(W), (__mmask8)(U), \ + (int)(R)); }) + +#define _mm512_maskz_div_round_pd(U, A, B, R) __extension__ ({ \ + (__m512d)__builtin_ia32_divpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_div_round_ps(A, B, R) __extension__ ({ \ + (__m512)__builtin_ia32_divps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, (int)(R)); }) + +#define _mm512_mask_div_round_ps(W, U, A, B, R) __extension__ ({ \ + (__m512)__builtin_ia32_divps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(W), (__mmask16)(U), \ + (int)(R)); }); + +#define _mm512_maskz_div_round_ps(U, A, B, R) __extension__ ({ \ + (__m512)__builtin_ia32_divps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R)); }); + #define _mm512_roundscale_ps(A, B) __extension__ ({ \ - (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(A), (B), (__v16sf)(A), \ - -1, _MM_FROUND_CUR_DIRECTION); }) + (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(B), \ + (__v16sf)(__m512)(A), (__mmask16)-1, \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_mask_roundscale_ps(A, B, C, imm) __extension__ ({\ + (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \ + (__v16sf)(__m512)(A), (__mmask16)(B), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_maskz_roundscale_ps(A, B, imm) __extension__ ({\ + (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(A), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_mask_roundscale_round_ps(A, B, C, imm, R) __extension__ ({ \ + (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \ + (__v16sf)(__m512)(A), (__mmask16)(B), \ + (int)(R)); }) + +#define _mm512_maskz_roundscale_round_ps(A, B, imm, R) __extension__ ({ \ + (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(A), (int)(R)); }) + +#define _mm512_roundscale_round_ps(A, imm, R) __extension__ ({ \ + (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(imm), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1, (int)(R)); }) #define _mm512_roundscale_pd(A, B) __extension__ ({ \ - (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(A), (B), (__v8df)(A), \ - -1, _MM_FROUND_CUR_DIRECTION); }) + (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(B), \ + (__v8df)(__m512d)(A), (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_mask_roundscale_pd(A, B, C, imm) __extension__ ({\ + (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \ + (__v8df)(__m512d)(A), (__mmask8)(B), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_maskz_roundscale_pd(A, B, imm) __extension__ ({\ + (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(A), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_mask_roundscale_round_pd(A, B, C, imm, R) __extension__ ({ \ + (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \ + (__v8df)(__m512d)(A), (__mmask8)(B), \ + (int)(R)); }) + +#define _mm512_maskz_roundscale_round_pd(A, B, imm, R) __extension__ ({ \ + (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(A), (int)(R)); }) + +#define _mm512_roundscale_round_pd(A, imm, R) __extension__ ({ \ + (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(imm), \ + (__v8df)_mm512_undefined_pd(), \ + (__mmask8)-1, (int)(R)); }) #define _mm512_fmadd_round_pd(A, B, C, R) __extension__ ({ \ - (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) (A), \ - (__v8df) (B), (__v8df) (C), \ - (__mmask8) -1, (R)); }) + (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), (__mmask8)-1, \ + (int)(R)); }) #define _mm512_mask_fmadd_round_pd(A, U, B, C, R) __extension__ ({ \ - (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) (A), \ - (__v8df) (B), (__v8df) (C), \ - (__mmask8) (U), (R)); }) + (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)); }) #define _mm512_mask3_fmadd_round_pd(A, B, C, U, R) __extension__ ({ \ - (__m512d) __builtin_ia32_vfmaddpd512_mask3 ((__v8df) (A), \ - (__v8df) (B), (__v8df) (C), \ - (__mmask8) (U), (R)); }) + (__m512d)__builtin_ia32_vfmaddpd512_mask3((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)); }) #define _mm512_maskz_fmadd_round_pd(U, A, B, C, R) __extension__ ({ \ - (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) (A), \ - (__v8df) (B), (__v8df) (C), \ - (__mmask8) (U), (R)); }) + (__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)); }) #define _mm512_fmsub_round_pd(A, B, C, R) __extension__ ({ \ - (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) (A), \ - (__v8df) (B), -(__v8df) (C), \ - (__mmask8) -1, (R)); }) + (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)-1, (int)(R)); }) #define _mm512_mask_fmsub_round_pd(A, U, B, C, R) __extension__ ({ \ - (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) (A), \ - (__v8df) (B), -(__v8df) (C), \ - (__mmask8) (U), (R)); }) + (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)); }) #define _mm512_maskz_fmsub_round_pd(U, A, B, C, R) __extension__ ({ \ - (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) (A), \ - (__v8df) (B), -(__v8df) (C), \ - (__mmask8) (U), (R)); }) + (__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)); }) #define _mm512_fnmadd_round_pd(A, B, C, R) __extension__ ({ \ - (__m512d) __builtin_ia32_vfmaddpd512_mask (-(__v8df) (A), \ - (__v8df) (B), (__v8df) (C), \ - (__mmask8) -1, (R)); }) + (__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), (__mmask8)-1, \ + (int)(R)); }) #define _mm512_mask3_fnmadd_round_pd(A, B, C, U, R) __extension__ ({ \ - (__m512d) __builtin_ia32_vfmaddpd512_mask3 (-(__v8df) (A), \ - (__v8df) (B), (__v8df) (C), \ - (__mmask8) (U), (R)); }) + (__m512d)__builtin_ia32_vfmaddpd512_mask3(-(__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)); }) #define _mm512_maskz_fnmadd_round_pd(U, A, B, C, R) __extension__ ({ \ - (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) (A), \ - (__v8df) (B), (__v8df) (C), \ - (__mmask8) (U), (R)); }) + (__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)); }) #define _mm512_fnmsub_round_pd(A, B, C, R) __extension__ ({ \ - (__m512d) __builtin_ia32_vfmaddpd512_mask (-(__v8df) (A), \ - (__v8df) (B), -(__v8df) (C), \ - (__mmask8) -1, (R)); }) + (__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)-1, (int)(R)); }) #define _mm512_maskz_fnmsub_round_pd(U, A, B, C, R) __extension__ ({ \ - (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) (A), \ - (__v8df) (B), -(__v8df) (C), \ - (__mmask8) (U), (R)); }) + (__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)); }) static __inline__ __m512d __DEFAULT_FN_ATTRS @@ -1074,75 +2723,87 @@ _mm512_maskz_fnmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) } #define _mm512_fmadd_round_ps(A, B, C, R) __extension__ ({ \ - (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) (A), \ - (__v16sf) (B), (__v16sf) (C), \ - (__mmask16) -1, (R)); }) + (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), (__mmask16)-1, \ + (int)(R)); }) #define _mm512_mask_fmadd_round_ps(A, U, B, C, R) __extension__ ({ \ - (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) (A), \ - (__v16sf) (B), (__v16sf) (C), \ - (__mmask16) (U), (R)); }) + (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)); }) #define _mm512_mask3_fmadd_round_ps(A, B, C, U, R) __extension__ ({ \ - (__m512) __builtin_ia32_vfmaddps512_mask3 ((__v16sf) (A), \ - (__v16sf) (B), (__v16sf) (C), \ - (__mmask16) (U), (R)); }) + (__m512)__builtin_ia32_vfmaddps512_mask3((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)); }) #define _mm512_maskz_fmadd_round_ps(U, A, B, C, R) __extension__ ({ \ - (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) (A), \ - (__v16sf) (B), (__v16sf) (C), \ - (__mmask16) (U), (R)); }) + (__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)); }) #define _mm512_fmsub_round_ps(A, B, C, R) __extension__ ({ \ - (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) (A), \ - (__v16sf) (B), -(__v16sf) (C), \ - (__mmask16) -1, (R)); }) + (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)-1, (int)(R)); }) #define _mm512_mask_fmsub_round_ps(A, U, B, C, R) __extension__ ({ \ - (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) (A), \ - (__v16sf) (B), -(__v16sf) (C), \ - (__mmask16) (U), (R)); }) + (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)); }) #define _mm512_maskz_fmsub_round_ps(U, A, B, C, R) __extension__ ({ \ - (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) (A), \ - (__v16sf) (B), -(__v16sf) (C), \ - (__mmask16) (U), (R)); }) + (__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)); }) #define _mm512_fnmadd_round_ps(A, B, C, R) __extension__ ({ \ - (__m512) __builtin_ia32_vfmaddps512_mask (-(__v16sf) (A), \ - (__v16sf) (B), (__v16sf) (C), \ - (__mmask16) -1, (R)); }) + (__m512)__builtin_ia32_vfmaddps512_mask(-(__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), (__mmask16)-1, \ + (int)(R)); }) #define _mm512_mask3_fnmadd_round_ps(A, B, C, U, R) __extension__ ({ \ - (__m512) __builtin_ia32_vfmaddps512_mask3 (-(__v16sf) (A), \ - (__v16sf) (B), (__v16sf) (C), \ - (__mmask16) (U), (R)); }) + (__m512)__builtin_ia32_vfmaddps512_mask3(-(__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)); }) #define _mm512_maskz_fnmadd_round_ps(U, A, B, C, R) __extension__ ({ \ - (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) (A), \ - (__v16sf) (B), (__v16sf) (C), \ - (__mmask16) (U), (R)); }) + (__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)); }) #define _mm512_fnmsub_round_ps(A, B, C, R) __extension__ ({ \ - (__m512) __builtin_ia32_vfmaddps512_mask (-(__v16sf) (A), \ - (__v16sf) (B), -(__v16sf) (C), \ - (__mmask16) -1, (R)); }) + (__m512)__builtin_ia32_vfmaddps512_mask(-(__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)-1, (int)(R)); }) #define _mm512_maskz_fnmsub_round_ps(U, A, B, C, R) __extension__ ({ \ - (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) (A), \ - (__v16sf) (B), -(__v16sf) (C), \ - (__mmask16) (U), (R)); }) + (__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)); }) static __inline__ __m512 __DEFAULT_FN_ATTRS @@ -1266,45 +2927,52 @@ _mm512_maskz_fnmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) } #define _mm512_fmaddsub_round_pd(A, B, C, R) __extension__ ({ \ - (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) (A), \ - (__v8df) (B), (__v8df) (C), \ - (__mmask8) -1, (R)); }) + (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)-1, (int)(R)); }) #define _mm512_mask_fmaddsub_round_pd(A, U, B, C, R) __extension__ ({ \ - (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) (A), \ - (__v8df) (B), (__v8df) (C), \ - (__mmask8) (U), (R)); }) + (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)); }) #define _mm512_mask3_fmaddsub_round_pd(A, B, C, U, R) __extension__ ({ \ - (__m512d) __builtin_ia32_vfmaddsubpd512_mask3 ((__v8df) (A), \ - (__v8df) (B), (__v8df) (C), \ - (__mmask8) (U), (R)); }) + (__m512d)__builtin_ia32_vfmaddsubpd512_mask3((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)); }) #define _mm512_maskz_fmaddsub_round_pd(U, A, B, C, R) __extension__ ({ \ - (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) (A), \ - (__v8df) (B), (__v8df) (C), \ - (__mmask8) (U), (R)); }) + (__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)); }) #define _mm512_fmsubadd_round_pd(A, B, C, R) __extension__ ({ \ - (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) (A), \ - (__v8df) (B), -(__v8df) (C), \ - (__mmask8) -1, (R)); }) + (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)-1, (int)(R)); }) #define _mm512_mask_fmsubadd_round_pd(A, U, B, C, R) __extension__ ({ \ - (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) (A), \ - (__v8df) (B), -(__v8df) (C), \ - (__mmask8) (U), (R)); }) + (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)); }) #define _mm512_maskz_fmsubadd_round_pd(U, A, B, C, R) __extension__ ({ \ - (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) (A), \ - (__v8df) (B), -(__v8df) (C), \ - (__mmask8) (U), (R)); }) + (__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)); }) static __inline__ __m512d __DEFAULT_FN_ATTRS @@ -1378,45 +3046,52 @@ _mm512_maskz_fmsubadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) } #define _mm512_fmaddsub_round_ps(A, B, C, R) __extension__ ({ \ - (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) (A), \ - (__v16sf) (B), (__v16sf) (C), \ - (__mmask16) -1, (R)); }) + (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)-1, (int)(R)); }) #define _mm512_mask_fmaddsub_round_ps(A, U, B, C, R) __extension__ ({ \ - (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) (A), \ - (__v16sf) (B), (__v16sf) (C), \ - (__mmask16) (U), (R)); }) + (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)); }) #define _mm512_mask3_fmaddsub_round_ps(A, B, C, U, R) __extension__ ({ \ - (__m512) __builtin_ia32_vfmaddsubps512_mask3 ((__v16sf) (A), \ - (__v16sf) (B), (__v16sf) (C), \ - (__mmask16) (U), (R)); }) + (__m512)__builtin_ia32_vfmaddsubps512_mask3((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)); }) #define _mm512_maskz_fmaddsub_round_ps(U, A, B, C, R) __extension__ ({ \ - (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) (A), \ - (__v16sf) (B), (__v16sf) (C), \ - (__mmask16) (U), (R)); }) + (__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)); }) #define _mm512_fmsubadd_round_ps(A, B, C, R) __extension__ ({ \ - (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) (A), \ - (__v16sf) (B), -(__v16sf) (C), \ - (__mmask16) -1, (R)); }) + (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)-1, (int)(R)); }) #define _mm512_mask_fmsubadd_round_ps(A, U, B, C, R) __extension__ ({ \ - (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) (A), \ - (__v16sf) (B), -(__v16sf) (C), \ - (__mmask16) (U), (R)); }) + (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)); }) #define _mm512_maskz_fmsubadd_round_ps(U, A, B, C, R) __extension__ ({ \ - (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) (A), \ - (__v16sf) (B), -(__v16sf) (C), \ - (__mmask16) (U), (R)); }) + (__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)); }) static __inline__ __m512 __DEFAULT_FN_ATTRS @@ -1490,9 +3165,10 @@ _mm512_maskz_fmsubadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) } #define _mm512_mask3_fmsub_round_pd(A, B, C, U, R) __extension__ ({ \ - (__m512d) __builtin_ia32_vfmsubpd512_mask3 ((__v8df) (A), \ - (__v8df) (B), (__v8df) (C), \ - (__mmask8) (U), (R)); }) + (__m512d)__builtin_ia32_vfmsubpd512_mask3((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)); }) static __inline__ __m512d __DEFAULT_FN_ATTRS @@ -1506,9 +3182,10 @@ _mm512_mask3_fmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) } #define _mm512_mask3_fmsub_round_ps(A, B, C, U, R) __extension__ ({ \ - (__m512) __builtin_ia32_vfmsubps512_mask3 ((__v16sf) (A), \ - (__v16sf) (B), (__v16sf) (C), \ - (__mmask16) (U), (R)); }) + (__m512)__builtin_ia32_vfmsubps512_mask3((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)); }) static __inline__ __m512 __DEFAULT_FN_ATTRS @@ -1522,9 +3199,10 @@ _mm512_mask3_fmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) } #define _mm512_mask3_fmsubadd_round_pd(A, B, C, U, R) __extension__ ({ \ - (__m512d) __builtin_ia32_vfmsubaddpd512_mask3 ((__v8df) (A), \ - (__v8df) (B), (__v8df) (C), \ - (__mmask8) (U), (R)); }) + (__m512d)__builtin_ia32_vfmsubaddpd512_mask3((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)); }) static __inline__ __m512d __DEFAULT_FN_ATTRS @@ -1538,9 +3216,10 @@ _mm512_mask3_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) } #define _mm512_mask3_fmsubadd_round_ps(A, B, C, U, R) __extension__ ({ \ - (__m512) __builtin_ia32_vfmsubaddps512_mask3 ((__v16sf) (A), \ - (__v16sf) (B), (__v16sf) (C), \ - (__mmask16) (U), (R)); }) + (__m512)__builtin_ia32_vfmsubaddps512_mask3((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)); }) static __inline__ __m512 __DEFAULT_FN_ATTRS @@ -1554,9 +3233,10 @@ _mm512_mask3_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) } #define _mm512_mask_fnmadd_round_pd(A, U, B, C, R) __extension__ ({ \ - (__m512d) __builtin_ia32_vfnmaddpd512_mask ((__v8df) (A), \ - (__v8df) (B), (__v8df) (C), \ - (__mmask8) (U), (R)); }) + (__m512d)__builtin_ia32_vfnmaddpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)); }) static __inline__ __m512d __DEFAULT_FN_ATTRS @@ -1570,9 +3250,10 @@ _mm512_mask_fnmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) } #define _mm512_mask_fnmadd_round_ps(A, U, B, C, R) __extension__ ({ \ - (__m512) __builtin_ia32_vfnmaddps512_mask ((__v16sf) (A), \ - (__v16sf) (B), (__v16sf) (C), \ - (__mmask16) (U), (R)); }) + (__m512)__builtin_ia32_vfnmaddps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)); }) static __inline__ __m512 __DEFAULT_FN_ATTRS @@ -1586,15 +3267,17 @@ _mm512_mask_fnmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) } #define _mm512_mask_fnmsub_round_pd(A, U, B, C, R) __extension__ ({ \ - (__m512d) __builtin_ia32_vfnmsubpd512_mask ((__v8df) (A), \ - (__v8df) (B), (__v8df) (C), \ - (__mmask8) (U), (R)); }) + (__m512d)__builtin_ia32_vfnmsubpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)); }) #define _mm512_mask3_fnmsub_round_pd(A, B, C, U, R) __extension__ ({ \ - (__m512d) __builtin_ia32_vfnmsubpd512_mask3 ((__v8df) (A), \ - (__v8df) (B), (__v8df) (C), \ - (__mmask8) (U), (R)); }) + (__m512d)__builtin_ia32_vfnmsubpd512_mask3((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R)); }) static __inline__ __m512d __DEFAULT_FN_ATTRS @@ -1618,15 +3301,17 @@ _mm512_mask3_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) } #define _mm512_mask_fnmsub_round_ps(A, U, B, C, R) __extension__ ({ \ - (__m512) __builtin_ia32_vfnmsubps512_mask ((__v16sf) (A), \ - (__v16sf) (B), (__v16sf) (C), \ - (__mmask16) (U), (R)); }) + (__m512)__builtin_ia32_vfnmsubps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)); }) #define _mm512_mask3_fnmsub_round_ps(A, B, C, U, R) __extension__ ({ \ - (__m512) __builtin_ia32_vfnmsubps512_mask3 ((__v16sf) (A), \ - (__v16sf) (B), (__v16sf) (C), \ - (__mmask16) (U), (R)); }) + (__m512)__builtin_ia32_vfnmsubps512_mask3((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R)); }) static __inline__ __m512 __DEFAULT_FN_ATTRS @@ -1662,6 +3347,29 @@ _mm512_permutex2var_epi32(__m512i __A, __m512i __I, __m512i __B) (__v16si) __B, (__mmask16) -1); } + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_permutex2var_epi32 (__m512i __A, __mmask16 __U, + __m512i __I, __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermt2vard512_mask ((__v16si) __I + /* idx */ , + (__v16si) __A, + (__v16si) __B, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_permutex2var_epi32 (__mmask16 __U, __m512i __A, + __m512i __I, __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermt2vard512_maskz ((__v16si) __I + /* idx */ , + (__v16si) __A, + (__v16si) __B, + (__mmask16) __U); +} + static __inline __m512i __DEFAULT_FN_ATTRS _mm512_permutex2var_epi64(__m512i __A, __m512i __I, __m512i __B) { @@ -1672,125 +3380,282 @@ _mm512_permutex2var_epi64(__m512i __A, __m512i __I, __m512i __B) (__mmask8) -1); } -static __inline __m512d __DEFAULT_FN_ATTRS -_mm512_permutex2var_pd(__m512d __A, __m512i __I, __m512d __B) +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_permutex2var_epi64 (__m512i __A, __mmask8 __U, __m512i __I, + __m512i __B) { - return (__m512d) __builtin_ia32_vpermt2varpd512_mask ((__v8di) __I - /* idx */ , - (__v8df) __A, - (__v8df) __B, - (__mmask8) -1); -} -static __inline __m512 __DEFAULT_FN_ATTRS -_mm512_permutex2var_ps(__m512 __A, __m512i __I, __m512 __B) -{ - return (__m512) __builtin_ia32_vpermt2varps512_mask ((__v16si) __I + return (__m512i) __builtin_ia32_vpermt2varq512_mask ((__v8di) __I /* idx */ , - (__v16sf) __A, - (__v16sf) __B, - (__mmask16) -1); + (__v8di) __A, + (__v8di) __B, + (__mmask8) __U); +} + + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_permutex2var_epi64 (__mmask8 __U, __m512i __A, + __m512i __I, __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermt2varq512_maskz ((__v8di) __I + /* idx */ , + (__v8di) __A, + (__v8di) __B, + (__mmask8) __U); } #define _mm512_alignr_epi64(A, B, I) __extension__ ({ \ - (__m512i)__builtin_ia32_alignq512_mask((__v8di)(__m512i)(A), \ - (__v8di)(__m512i)(B), \ - (I), (__v8di)_mm512_setzero_si512(), \ - (__mmask8)-1); }) + (__m512i)__builtin_shufflevector((__v8di)(__m512i)(B), \ + (__v8di)(__m512i)(A), \ + ((int)(I) & 0x7) + 0, \ + ((int)(I) & 0x7) + 1, \ + ((int)(I) & 0x7) + 2, \ + ((int)(I) & 0x7) + 3, \ + ((int)(I) & 0x7) + 4, \ + ((int)(I) & 0x7) + 5, \ + ((int)(I) & 0x7) + 6, \ + ((int)(I) & 0x7) + 7); }) + +#define _mm512_mask_alignr_epi64(W, U, A, B, imm) __extension__({\ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \ + (__v8di)(__m512i)(W)); }) + +#define _mm512_maskz_alignr_epi64(U, A, B, imm) __extension__({\ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \ + (__v8di)_mm512_setzero_si512()); }) #define _mm512_alignr_epi32(A, B, I) __extension__ ({ \ - (__m512i)__builtin_ia32_alignd512_mask((__v16si)(__m512i)(A), \ - (__v16si)(__m512i)(B), \ - (I), (__v16si)_mm512_setzero_si512(), \ - (__mmask16)-1); }) + (__m512i)__builtin_shufflevector((__v16si)(__m512i)(B), \ + (__v16si)(__m512i)(A), \ + ((int)(I) & 0xf) + 0, \ + ((int)(I) & 0xf) + 1, \ + ((int)(I) & 0xf) + 2, \ + ((int)(I) & 0xf) + 3, \ + ((int)(I) & 0xf) + 4, \ + ((int)(I) & 0xf) + 5, \ + ((int)(I) & 0xf) + 6, \ + ((int)(I) & 0xf) + 7, \ + ((int)(I) & 0xf) + 8, \ + ((int)(I) & 0xf) + 9, \ + ((int)(I) & 0xf) + 10, \ + ((int)(I) & 0xf) + 11, \ + ((int)(I) & 0xf) + 12, \ + ((int)(I) & 0xf) + 13, \ + ((int)(I) & 0xf) + 14, \ + ((int)(I) & 0xf) + 15); }) +#define _mm512_mask_alignr_epi32(W, U, A, B, imm) __extension__ ({\ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \ + (__v16si)(__m512i)(W)); }) + +#define _mm512_maskz_alignr_epi32(U, A, B, imm) __extension__({\ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \ + (__v16si)_mm512_setzero_si512()); }) /* Vector Extract */ -#define _mm512_extractf64x4_pd(A, I) __extension__ ({ \ - __m512d __A = (A); \ - (__m256d) \ - __builtin_ia32_extractf64x4_mask((__v8df)__A, \ - (I), \ - (__v4df)_mm256_setzero_si256(), \ - (__mmask8) -1); }) +#define _mm512_extractf64x4_pd(A, I) __extension__ ({ \ + (__m256d)__builtin_shufflevector((__v8df)(__m512d)(A), \ + (__v8df)_mm512_undefined_pd(), \ + ((I) & 1) ? 4 : 0, \ + ((I) & 1) ? 5 : 1, \ + ((I) & 1) ? 6 : 2, \ + ((I) & 1) ? 7 : 3); }) -#define _mm512_extractf32x4_ps(A, I) __extension__ ({ \ - __m512 __A = (A); \ - (__m128) \ - __builtin_ia32_extractf32x4_mask((__v16sf)__A, \ - (I), \ - (__v4sf)_mm_setzero_ps(), \ - (__mmask8) -1); }) +#define _mm512_mask_extractf64x4_pd(W, U, A, imm) __extension__ ({\ + (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm512_extractf64x4_pd((A), (imm)), \ + (__v4df)(W)); }) + +#define _mm512_maskz_extractf64x4_pd(U, A, imm) __extension__ ({\ + (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm512_extractf64x4_pd((A), (imm)), \ + (__v4df)_mm256_setzero_pd()); }) + +#define _mm512_extractf32x4_ps(A, I) __extension__ ({ \ + (__m128)__builtin_shufflevector((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_undefined_ps(), \ + 0 + ((I) & 0x3) * 4, \ + 1 + ((I) & 0x3) * 4, \ + 2 + ((I) & 0x3) * 4, \ + 3 + ((I) & 0x3) * 4); }) + +#define _mm512_mask_extractf32x4_ps(W, U, A, imm) __extension__ ({\ + (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ + (__v4sf)_mm512_extractf32x4_ps((A), (imm)), \ + (__v4sf)(W)); }) + +#define _mm512_maskz_extractf32x4_ps(U, A, imm) __extension__ ({\ + (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ + (__v4sf)_mm512_extractf32x4_ps((A), (imm)), \ + (__v4sf)_mm_setzero_ps()); }) /* Vector Blend */ static __inline __m512d __DEFAULT_FN_ATTRS _mm512_mask_blend_pd(__mmask8 __U, __m512d __A, __m512d __W) { - return (__m512d) __builtin_ia32_blendmpd_512_mask ((__v8df) __A, + return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U, (__v8df) __W, - (__mmask8) __U); + (__v8df) __A); } static __inline __m512 __DEFAULT_FN_ATTRS _mm512_mask_blend_ps(__mmask16 __U, __m512 __A, __m512 __W) { - return (__m512) __builtin_ia32_blendmps_512_mask ((__v16sf) __A, + return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U, (__v16sf) __W, - (__mmask16) __U); + (__v16sf) __A); } static __inline __m512i __DEFAULT_FN_ATTRS _mm512_mask_blend_epi64(__mmask8 __U, __m512i __A, __m512i __W) { - return (__m512i) __builtin_ia32_blendmq_512_mask ((__v8di) __A, + return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U, (__v8di) __W, - (__mmask8) __U); + (__v8di) __A); } static __inline __m512i __DEFAULT_FN_ATTRS _mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W) { - return (__m512i) __builtin_ia32_blendmd_512_mask ((__v16si) __A, + return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U, (__v16si) __W, - (__mmask16) __U); + (__v16si) __A); } /* Compare */ #define _mm512_cmp_round_ps_mask(A, B, P, R) __extension__ ({ \ (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \ - (__v16sf)(__m512)(B), \ - (P), (__mmask16)-1, (R)); }) + (__v16sf)(__m512)(B), (int)(P), \ + (__mmask16)-1, (int)(R)); }) #define _mm512_mask_cmp_round_ps_mask(U, A, B, P, R) __extension__ ({ \ (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \ - (__v16sf)(__m512)(B), \ - (P), (__mmask16)(U), (R)); }) + (__v16sf)(__m512)(B), (int)(P), \ + (__mmask16)(U), (int)(R)); }) #define _mm512_cmp_ps_mask(A, B, P) \ _mm512_cmp_round_ps_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION) - #define _mm512_mask_cmp_ps_mask(U, A, B, P) \ _mm512_mask_cmp_round_ps_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION) +#define _mm512_cmpeq_ps_mask(A, B) \ + _mm512_cmp_ps_mask((A), (B), _CMP_EQ_OQ) +#define _mm512_mask_cmpeq_ps_mask(k, A, B) \ + _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_EQ_OQ) + +#define _mm512_cmplt_ps_mask(A, B) \ + _mm512_cmp_ps_mask((A), (B), _CMP_LT_OS) +#define _mm512_mask_cmplt_ps_mask(k, A, B) \ + _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_LT_OS) + +#define _mm512_cmple_ps_mask(A, B) \ + _mm512_cmp_ps_mask((A), (B), _CMP_LE_OS) +#define _mm512_mask_cmple_ps_mask(k, A, B) \ + _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_LE_OS) + +#define _mm512_cmpunord_ps_mask(A, B) \ + _mm512_cmp_ps_mask((A), (B), _CMP_UNORD_Q) +#define _mm512_mask_cmpunord_ps_mask(k, A, B) \ + _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_UNORD_Q) + +#define _mm512_cmpneq_ps_mask(A, B) \ + _mm512_cmp_ps_mask((A), (B), _CMP_NEQ_UQ) +#define _mm512_mask_cmpneq_ps_mask(k, A, B) \ + _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_NEQ_UQ) + +#define _mm512_cmpnlt_ps_mask(A, B) \ + _mm512_cmp_ps_mask((A), (B), _CMP_NLT_US) +#define _mm512_mask_cmpnlt_ps_mask(k, A, B) \ + _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_NLT_US) + +#define _mm512_cmpnle_ps_mask(A, B) \ + _mm512_cmp_ps_mask((A), (B), _CMP_NLE_US) +#define _mm512_mask_cmpnle_ps_mask(k, A, B) \ + _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_NLE_US) + +#define _mm512_cmpord_ps_mask(A, B) \ + _mm512_cmp_ps_mask((A), (B), _CMP_ORD_Q) +#define _mm512_mask_cmpord_ps_mask(k, A, B) \ + _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_ORD_Q) + #define _mm512_cmp_round_pd_mask(A, B, P, R) __extension__ ({ \ (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \ - (__v8df)(__m512d)(B), \ - (P), (__mmask8)-1, (R)); }) + (__v8df)(__m512d)(B), (int)(P), \ + (__mmask8)-1, (int)(R)); }) #define _mm512_mask_cmp_round_pd_mask(U, A, B, P, R) __extension__ ({ \ (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \ - (__v8df)(__m512d)(B), \ - (P), (__mmask8)(U), (R)); }) + (__v8df)(__m512d)(B), (int)(P), \ + (__mmask8)(U), (int)(R)); }) #define _mm512_cmp_pd_mask(A, B, P) \ _mm512_cmp_round_pd_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION) - #define _mm512_mask_cmp_pd_mask(U, A, B, P) \ _mm512_mask_cmp_round_pd_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION) +#define _mm512_cmpeq_pd_mask(A, B) \ + _mm512_cmp_pd_mask((A), (B), _CMP_EQ_OQ) +#define _mm512_mask_cmpeq_pd_mask(k, A, B) \ + _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_EQ_OQ) + +#define _mm512_cmplt_pd_mask(A, B) \ + _mm512_cmp_pd_mask((A), (B), _CMP_LT_OS) +#define _mm512_mask_cmplt_pd_mask(k, A, B) \ + _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_LT_OS) + +#define _mm512_cmple_pd_mask(A, B) \ + _mm512_cmp_pd_mask((A), (B), _CMP_LE_OS) +#define _mm512_mask_cmple_pd_mask(k, A, B) \ + _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_LE_OS) + +#define _mm512_cmpunord_pd_mask(A, B) \ + _mm512_cmp_pd_mask((A), (B), _CMP_UNORD_Q) +#define _mm512_mask_cmpunord_pd_mask(k, A, B) \ + _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_UNORD_Q) + +#define _mm512_cmpneq_pd_mask(A, B) \ + _mm512_cmp_pd_mask((A), (B), _CMP_NEQ_UQ) +#define _mm512_mask_cmpneq_pd_mask(k, A, B) \ + _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_NEQ_UQ) + +#define _mm512_cmpnlt_pd_mask(A, B) \ + _mm512_cmp_pd_mask((A), (B), _CMP_NLT_US) +#define _mm512_mask_cmpnlt_pd_mask(k, A, B) \ + _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_NLT_US) + +#define _mm512_cmpnle_pd_mask(A, B) \ + _mm512_cmp_pd_mask((A), (B), _CMP_NLE_US) +#define _mm512_mask_cmpnle_pd_mask(k, A, B) \ + _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_NLE_US) + +#define _mm512_cmpord_pd_mask(A, B) \ + _mm512_cmp_pd_mask((A), (B), _CMP_ORD_Q) +#define _mm512_mask_cmpord_pd_mask(k, A, B) \ + _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_ORD_Q) + /* Conversion */ +#define _mm512_cvtt_roundps_epu32(A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \ + (__v16si)_mm512_undefined_epi32(), \ + (__mmask16)-1, (int)(R)); }) + +#define _mm512_mask_cvtt_roundps_epu32(W, U, A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \ + (__v16si)(__m512i)(W), \ + (__mmask16)(U), (int)(R)); }) + +#define _mm512_maskz_cvtt_roundps_epu32(U, A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)(U), (int)(R)); }) + + static __inline __m512i __DEFAULT_FN_ATTRS _mm512_cvttps_epu32(__m512 __A) { @@ -1801,43 +3666,281 @@ _mm512_cvttps_epu32(__m512 __A) _MM_FROUND_CUR_DIRECTION); } +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvttps_epu32 (__m512i __W, __mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A, + (__v16si) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvttps_epu32 (__mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A, + (__v16si) _mm512_setzero_si512 (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + #define _mm512_cvt_roundepi32_ps(A, R) __extension__ ({ \ - (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(A), \ + (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \ (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)-1, (R)); }) + (__mmask16)-1, (int)(R)); }) + +#define _mm512_mask_cvt_roundepi32_ps(W, U, A, R) __extension__ ({ \ + (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), (int)(R)); }) + +#define _mm512_maskz_cvt_roundepi32_ps(U, A, R) __extension__ ({ \ + (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R)); }) #define _mm512_cvt_roundepu32_ps(A, R) __extension__ ({ \ - (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(A), \ + (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \ (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)-1, (R)); }) + (__mmask16)-1, (int)(R)); }) + +#define _mm512_mask_cvt_roundepu32_ps(W, U, A, R) __extension__ ({ \ + (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), (int)(R)); }) + +#define _mm512_maskz_cvt_roundepu32_ps(U, A, R) __extension__ ({ \ + (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R)); }) + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_cvtepu32_ps (__m512i __A) +{ + return (__m512) __builtin_ia32_cvtudq2ps512_mask ((__v16si) __A, + (__v16sf) _mm512_undefined_ps (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_cvtepu32_ps (__m512 __W, __mmask16 __U, __m512i __A) +{ + return (__m512) __builtin_ia32_cvtudq2ps512_mask ((__v16si) __A, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepu32_ps (__mmask16 __U, __m512i __A) +{ + return (__m512) __builtin_ia32_cvtudq2ps512_mask ((__v16si) __A, + (__v16sf) _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} static __inline __m512d __DEFAULT_FN_ATTRS _mm512_cvtepi32_pd(__m256i __A) { - return (__m512d) __builtin_ia32_cvtdq2pd512_mask ((__v8si) __A, - (__v8df) - _mm512_setzero_pd (), - (__mmask8) -1); + return (__m512d)__builtin_convertvector((__v8si)__A, __v8df); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_cvtepi32_pd (__m512d __W, __mmask8 __U, __m256i __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_cvtepi32_pd(__A), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepi32_pd (__mmask8 __U, __m256i __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_cvtepi32_pd(__A), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_cvtepi32lo_pd(__m512i __A) +{ + return (__m512d) _mm512_cvtepi32_pd(_mm512_castsi512_si256(__A)); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_cvtepi32lo_pd(__m512d __W, __mmask8 __U,__m512i __A) +{ + return (__m512d) _mm512_mask_cvtepi32_pd(__W, __U, _mm512_castsi512_si256(__A)); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_cvtepi32_ps (__m512i __A) +{ + return (__m512) __builtin_ia32_cvtdq2ps512_mask ((__v16si) __A, + (__v16sf) _mm512_undefined_ps (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_cvtepi32_ps (__m512 __W, __mmask16 __U, __m512i __A) +{ + return (__m512) __builtin_ia32_cvtdq2ps512_mask ((__v16si) __A, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepi32_ps (__mmask16 __U, __m512i __A) +{ + return (__m512) __builtin_ia32_cvtdq2ps512_mask ((__v16si) __A, + (__v16sf) _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); } static __inline __m512d __DEFAULT_FN_ATTRS _mm512_cvtepu32_pd(__m256i __A) { - return (__m512d) __builtin_ia32_cvtudq2pd512_mask ((__v8si) __A, - (__v8df) - _mm512_setzero_pd (), - (__mmask8) -1); + return (__m512d)__builtin_convertvector((__v8su)__A, __v8df); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_cvtepu32_pd (__m512d __W, __mmask8 __U, __m256i __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_cvtepu32_pd(__A), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepu32_pd (__mmask8 __U, __m256i __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_cvtepu32_pd(__A), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_cvtepu32lo_pd(__m512i __A) +{ + return (__m512d) _mm512_cvtepu32_pd(_mm512_castsi512_si256(__A)); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_cvtepu32lo_pd(__m512d __W, __mmask8 __U,__m512i __A) +{ + return (__m512d) _mm512_mask_cvtepu32_pd(__W, __U, _mm512_castsi512_si256(__A)); } #define _mm512_cvt_roundpd_ps(A, R) __extension__ ({ \ - (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(A), \ + (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \ (__v8sf)_mm256_setzero_ps(), \ - (__mmask8)-1, (R)); }) + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_cvt_roundpd_ps(W, U, A, R) __extension__ ({ \ + (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \ + (__v8sf)(__m256)(W), (__mmask8)(U), \ + (int)(R)); }) + +#define _mm512_maskz_cvt_roundpd_ps(U, A, R) __extension__ ({ \ + (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm512_cvtpd_ps (__m512d __A) +{ + return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A, + (__v8sf) _mm256_undefined_ps (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm512_mask_cvtpd_ps (__m256 __W, __mmask8 __U, __m512d __A) +{ + return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A, + (__v8sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm512_maskz_cvtpd_ps (__mmask8 __U, __m512d __A) +{ + return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A, + (__v8sf) _mm256_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_cvtpd_pslo (__m512d __A) +{ + return (__m512) __builtin_shufflevector((__v8sf) _mm512_cvtpd_ps(__A), + (__v8sf) _mm256_setzero_ps (), + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_cvtpd_pslo (__m512 __W, __mmask8 __U,__m512d __A) +{ + return (__m512) __builtin_shufflevector ( + (__v8sf) _mm512_mask_cvtpd_ps (_mm512_castps512_ps256(__W), + __U, __A), + (__v8sf) _mm256_setzero_ps (), + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); +} + +#define _mm512_cvt_roundps_ph(A, I) __extension__ ({ \ + (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \ + (__v16hi)_mm256_undefined_si256(), \ + (__mmask16)-1); }) + +#define _mm512_mask_cvt_roundps_ph(U, W, A, I) __extension__ ({ \ + (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \ + (__v16hi)(__m256i)(U), \ + (__mmask16)(W)); }) + +#define _mm512_maskz_cvt_roundps_ph(W, A, I) __extension__ ({ \ + (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \ + (__v16hi)_mm256_setzero_si256(), \ + (__mmask16)(W)); }) #define _mm512_cvtps_ph(A, I) __extension__ ({ \ - (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(A), (I), \ + (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \ (__v16hi)_mm256_setzero_si256(), \ - -1); }) + (__mmask16)-1); }) + +#define _mm512_mask_cvtps_ph(U, W, A, I) __extension__ ({ \ + (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \ + (__v16hi)(__m256i)(U), \ + (__mmask16)(W)); }) + +#define _mm512_maskz_cvtps_ph(W, A, I) __extension__ ({\ + (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \ + (__v16hi)_mm256_setzero_si256(), \ + (__mmask16)(W)); }) + +#define _mm512_cvt_roundph_ps(A, R) __extension__ ({ \ + (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1, (int)(R)); }) + +#define _mm512_mask_cvt_roundph_ps(W, U, A, R) __extension__ ({ \ + (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), (int)(R)); }) + +#define _mm512_maskz_cvt_roundph_ps(U, A, R) __extension__ ({ \ + (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R)); }) + static __inline __m512 __DEFAULT_FN_ATTRS _mm512_cvtph_ps(__m256i __A) @@ -1849,85 +3952,479 @@ _mm512_cvtph_ps(__m256i __A) _MM_FROUND_CUR_DIRECTION); } -static __inline __m512i __DEFAULT_FN_ATTRS -_mm512_cvttps_epi32(__m512 a) +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_cvtph_ps (__m512 __W, __mmask16 __U, __m256i __A) { - return (__m512i) - __builtin_ia32_cvttps2dq512_mask((__v16sf) a, - (__v16si) _mm512_setzero_si512 (), - (__mmask16) -1, _MM_FROUND_CUR_DIRECTION); + return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); } -static __inline __m256i __DEFAULT_FN_ATTRS -_mm512_cvttpd_epi32(__m512d a) +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_cvtph_ps (__mmask16 __U, __m256i __A) { - return (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df) a, + return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A, + (__v16sf) _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvtt_roundpd_epi32(A, R) __extension__ ({ \ + (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_cvtt_roundpd_epi32(W, U, A, R) __extension__ ({ \ + (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \ + (__v8si)(__m256i)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_maskz_cvtt_roundpd_epi32(U, A, R) __extension__ ({ \ + (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline __m256i __DEFAULT_FN_ATTRS +_mm512_cvttpd_epi32(__m512d __a) +{ + return (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df) __a, (__v8si)_mm256_setzero_si256(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION); } -#define _mm512_cvtt_roundpd_epi32(A, R) __extension__ ({ \ - (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(A), \ - (__v8si)_mm256_setzero_si256(), \ - (__mmask8)-1, (R)); }) +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_mask_cvttpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A, + (__v8si) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_maskz_cvttpd_epi32 (__mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A, + (__v8si) _mm256_setzero_si256 (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} #define _mm512_cvtt_roundps_epi32(A, R) __extension__ ({ \ - (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(A), \ + (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \ (__v16si)_mm512_setzero_si512(), \ - (__mmask16)-1, (R)); }) + (__mmask16)-1, (int)(R)); }) + +#define _mm512_mask_cvtt_roundps_epi32(W, U, A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \ + (__v16si)(__m512i)(W), \ + (__mmask16)(U), (int)(R)); }) + +#define _mm512_maskz_cvtt_roundps_epi32(U, A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)(U), (int)(R)); }) + +static __inline __m512i __DEFAULT_FN_ATTRS +_mm512_cvttps_epi32(__m512 __a) +{ + return (__m512i) + __builtin_ia32_cvttps2dq512_mask((__v16sf) __a, + (__v16si) _mm512_setzero_si512 (), + (__mmask16) -1, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvttps_epi32 (__m512i __W, __mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A, + (__v16si) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvttps_epi32 (__mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A, + (__v16si) _mm512_setzero_si512 (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} #define _mm512_cvt_roundps_epi32(A, R) __extension__ ({ \ - (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(A), \ + (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \ (__v16si)_mm512_setzero_si512(), \ - (__mmask16)-1, (R)); }) + (__mmask16)-1, (int)(R)); }) + +#define _mm512_mask_cvt_roundps_epi32(W, U, A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \ + (__v16si)(__m512i)(W), \ + (__mmask16)(U), (int)(R)); }) + +#define _mm512_maskz_cvt_roundps_epi32(U, A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)(U), (int)(R)); }) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_cvtps_epi32 (__m512 __A) +{ + return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A, + (__v16si) _mm512_undefined_epi32 (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvtps_epi32 (__m512i __W, __mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A, + (__v16si) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtps_epi32 (__mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} #define _mm512_cvt_roundpd_epi32(A, R) __extension__ ({ \ - (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(A), \ + (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \ (__v8si)_mm256_setzero_si256(), \ - (__mmask8)-1, (R)); }) + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_cvt_roundpd_epi32(W, U, A, R) __extension__ ({ \ + (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \ + (__v8si)(__m256i)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_maskz_cvt_roundpd_epi32(U, A, R) __extension__ ({ \ + (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_cvtpd_epi32 (__m512d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A, + (__v8si) + _mm256_undefined_si256 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_mask_cvtpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A, + (__v8si) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtpd_epi32 (__mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} #define _mm512_cvt_roundps_epu32(A, R) __extension__ ({ \ - (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(A), \ + (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \ (__v16si)_mm512_setzero_si512(), \ - (__mmask16)-1, (R)); }) + (__mmask16)-1, (int)(R)); }) + +#define _mm512_mask_cvt_roundps_epu32(W, U, A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \ + (__v16si)(__m512i)(W), \ + (__mmask16)(U), (int)(R)); }) + +#define _mm512_maskz_cvt_roundps_epu32(U, A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)(U), (int)(R)); }) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_cvtps_epu32 ( __m512 __A) +{ + return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,\ + (__v16si)\ + _mm512_undefined_epi32 (),\ + (__mmask16) -1,\ + _MM_FROUND_CUR_DIRECTION);\ +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvtps_epu32 (__m512i __W, __mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A, + (__v16si) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtps_epu32 ( __mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U , + _MM_FROUND_CUR_DIRECTION); +} #define _mm512_cvt_roundpd_epu32(A, R) __extension__ ({ \ - (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(A), \ + (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \ (__v8si)_mm256_setzero_si256(), \ - (__mmask8) -1, (R)); }) + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_cvt_roundpd_epu32(W, U, A, R) __extension__ ({ \ + (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \ + (__v8si)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_maskz_cvt_roundpd_epu32(U, A, R) __extension__ ({ \ + (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_cvtpd_epu32 (__m512d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A, + (__v8si) + _mm256_undefined_si256 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_mask_cvtpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A, + (__v8si) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtpd_epu32 (__mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} /* Unpack and Interleave */ + static __inline __m512d __DEFAULT_FN_ATTRS _mm512_unpackhi_pd(__m512d __a, __m512d __b) { - return __builtin_shufflevector(__a, __b, 1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6); + return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b, + 1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_unpackhi_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_unpackhi_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_unpackhi_pd(__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_unpackhi_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); } static __inline __m512d __DEFAULT_FN_ATTRS _mm512_unpacklo_pd(__m512d __a, __m512d __b) { - return __builtin_shufflevector(__a, __b, 0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6); + return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b, + 0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_unpacklo_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_unpacklo_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_unpacklo_pd (__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_unpacklo_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); } static __inline __m512 __DEFAULT_FN_ATTRS _mm512_unpackhi_ps(__m512 __a, __m512 __b) { - return __builtin_shufflevector(__a, __b, - 2, 18, 3, 19, - 2+4, 18+4, 3+4, 19+4, - 2+8, 18+8, 3+8, 19+8, - 2+12, 18+12, 3+12, 19+12); + return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b, + 2, 18, 3, 19, + 2+4, 18+4, 3+4, 19+4, + 2+8, 18+8, 3+8, 19+8, + 2+12, 18+12, 3+12, 19+12); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_unpackhi_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16) __U, + (__v16sf)_mm512_unpackhi_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_unpackhi_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16) __U, + (__v16sf)_mm512_unpackhi_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); } static __inline __m512 __DEFAULT_FN_ATTRS _mm512_unpacklo_ps(__m512 __a, __m512 __b) { - return __builtin_shufflevector(__a, __b, - 0, 16, 1, 17, - 0+4, 16+4, 1+4, 17+4, - 0+8, 16+8, 1+8, 17+8, - 0+12, 16+12, 1+12, 17+12); + return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b, + 0, 16, 1, 17, + 0+4, 16+4, 1+4, 17+4, + 0+8, 16+8, 1+8, 17+8, + 0+12, 16+12, 1+12, 17+12); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_unpacklo_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16) __U, + (__v16sf)_mm512_unpacklo_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_unpacklo_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16) __U, + (__v16sf)_mm512_unpacklo_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_unpackhi_epi32(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_shufflevector((__v16si)__A, (__v16si)__B, + 2, 18, 3, 19, + 2+4, 18+4, 3+4, 19+4, + 2+8, 18+8, 3+8, 19+8, + 2+12, 18+12, 3+12, 19+12); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_unpackhi_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U, + (__v16si)_mm512_unpackhi_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_unpackhi_epi32(__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U, + (__v16si)_mm512_unpackhi_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_unpacklo_epi32(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_shufflevector((__v16si)__A, (__v16si)__B, + 0, 16, 1, 17, + 0+4, 16+4, 1+4, 17+4, + 0+8, 16+8, 1+8, 17+8, + 0+12, 16+12, 1+12, 17+12); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_unpacklo_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U, + (__v16si)_mm512_unpacklo_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_unpacklo_epi32(__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U, + (__v16si)_mm512_unpacklo_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_unpackhi_epi64(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_shufflevector((__v8di)__A, (__v8di)__B, + 1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_unpackhi_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U, + (__v8di)_mm512_unpackhi_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_unpackhi_epi64(__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U, + (__v8di)_mm512_unpackhi_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_unpacklo_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_shufflevector((__v8di)__A, (__v8di)__B, + 0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_unpacklo_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U, + (__v8di)_mm512_unpacklo_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_unpacklo_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U, + (__v8di)_mm512_unpacklo_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); } /* Bit Test */ @@ -1940,6 +4437,13 @@ _mm512_test_epi32_mask(__m512i __A, __m512i __B) (__mmask16) -1); } +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_mask_test_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__mmask16) __builtin_ia32_ptestmd512 ((__v16si) __A, + (__v16si) __B, __U); +} + static __inline __mmask8 __DEFAULT_FN_ATTRS _mm512_test_epi64_mask(__m512i __A, __m512i __B) { @@ -1948,57 +4452,88 @@ _mm512_test_epi64_mask(__m512i __A, __m512i __B) (__mmask8) -1); } +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm512_mask_test_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__mmask8) __builtin_ia32_ptestmq512 ((__v8di) __A, (__v8di) __B, __U); +} + + /* SIMD load ops */ +static __inline __m512i __DEFAULT_FN_ATTRS +_mm512_loadu_si512 (void const *__P) +{ + return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *) __P, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) -1); +} + +static __inline __m512i __DEFAULT_FN_ATTRS +_mm512_mask_loadu_epi32 (__m512i __W, __mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *) __P, + (__v16si) __W, + (__mmask16) __U); +} + + static __inline __m512i __DEFAULT_FN_ATTRS _mm512_maskz_loadu_epi32(__mmask16 __U, void const *__P) { - return (__m512i) __builtin_ia32_loaddqusi512_mask ((const __v16si *)__P, + return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *)__P, (__v16si) _mm512_setzero_si512 (), (__mmask16) __U); } +static __inline __m512i __DEFAULT_FN_ATTRS +_mm512_mask_loadu_epi64 (__m512i __W, __mmask8 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddqudi512_mask ((const long long *) __P, + (__v8di) __W, + (__mmask8) __U); +} + static __inline __m512i __DEFAULT_FN_ATTRS _mm512_maskz_loadu_epi64(__mmask8 __U, void const *__P) { - return (__m512i) __builtin_ia32_loaddqudi512_mask ((const __v8di *)__P, + return (__m512i) __builtin_ia32_loaddqudi512_mask ((const long long *)__P, (__v8di) _mm512_setzero_si512 (), (__mmask8) __U); } +static __inline __m512 __DEFAULT_FN_ATTRS +_mm512_mask_loadu_ps (__m512 __W, __mmask16 __U, void const *__P) +{ + return (__m512) __builtin_ia32_loadups512_mask ((const float *) __P, + (__v16sf) __W, + (__mmask16) __U); +} + static __inline __m512 __DEFAULT_FN_ATTRS _mm512_maskz_loadu_ps(__mmask16 __U, void const *__P) { - return (__m512) __builtin_ia32_loadups512_mask ((const __v16sf *)__P, + return (__m512) __builtin_ia32_loadups512_mask ((const float *)__P, (__v16sf) _mm512_setzero_ps (), (__mmask16) __U); } +static __inline __m512d __DEFAULT_FN_ATTRS +_mm512_mask_loadu_pd (__m512d __W, __mmask8 __U, void const *__P) +{ + return (__m512d) __builtin_ia32_loadupd512_mask ((const double *) __P, + (__v8df) __W, + (__mmask8) __U); +} + static __inline __m512d __DEFAULT_FN_ATTRS _mm512_maskz_loadu_pd(__mmask8 __U, void const *__P) { - return (__m512d) __builtin_ia32_loadupd512_mask ((const __v8df *)__P, - (__v8df) - _mm512_setzero_pd (), - (__mmask8) __U); -} - -static __inline __m512 __DEFAULT_FN_ATTRS -_mm512_maskz_load_ps(__mmask16 __U, void const *__P) -{ - return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *)__P, - (__v16sf) - _mm512_setzero_ps (), - (__mmask16) __U); -} - -static __inline __m512d __DEFAULT_FN_ATTRS -_mm512_maskz_load_pd(__mmask8 __U, void const *__P) -{ - return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *)__P, + return (__m512d) __builtin_ia32_loadupd512_mask ((const double *)__P, (__v8df) _mm512_setzero_pd (), (__mmask8) __U); @@ -2023,7 +4558,7 @@ _mm512_loadu_ps(float const *__p) } static __inline __m512 __DEFAULT_FN_ATTRS -_mm512_load_ps(double const *__p) +_mm512_load_ps(float const *__p) { return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *)__p, (__v16sf) @@ -2031,8 +4566,25 @@ _mm512_load_ps(double const *__p) (__mmask16) -1); } +static __inline __m512 __DEFAULT_FN_ATTRS +_mm512_mask_load_ps (__m512 __W, __mmask16 __U, void const *__P) +{ + return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *) __P, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_load_ps(__mmask16 __U, void const *__P) +{ + return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *)__P, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + static __inline __m512d __DEFAULT_FN_ATTRS -_mm512_load_pd(float const *__p) +_mm512_load_pd(double const *__p) { return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *)__p, (__v8df) @@ -2040,45 +4592,87 @@ _mm512_load_pd(float const *__p) (__mmask8) -1); } +static __inline __m512d __DEFAULT_FN_ATTRS +_mm512_mask_load_pd (__m512d __W, __mmask8 __U, void const *__P) +{ + return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *) __P, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_load_pd(__mmask8 __U, void const *__P) +{ + return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *)__P, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +static __inline __m512i __DEFAULT_FN_ATTRS +_mm512_load_si512 (void const *__P) +{ + return *(__m512i *) __P; +} + +static __inline __m512i __DEFAULT_FN_ATTRS +_mm512_load_epi32 (void const *__P) +{ + return *(__m512i *) __P; +} + +static __inline __m512i __DEFAULT_FN_ATTRS +_mm512_load_epi64 (void const *__P) +{ + return *(__m512i *) __P; +} + /* SIMD store ops */ static __inline void __DEFAULT_FN_ATTRS _mm512_mask_storeu_epi64(void *__P, __mmask8 __U, __m512i __A) { - __builtin_ia32_storedqudi512_mask ((__v8di *)__P, (__v8di) __A, + __builtin_ia32_storedqudi512_mask ((long long *)__P, (__v8di) __A, (__mmask8) __U); } +static __inline void __DEFAULT_FN_ATTRS +_mm512_storeu_si512 (void *__P, __m512i __A) +{ + __builtin_ia32_storedqusi512_mask ((int *) __P, (__v16si) __A, + (__mmask16) -1); +} + static __inline void __DEFAULT_FN_ATTRS _mm512_mask_storeu_epi32(void *__P, __mmask16 __U, __m512i __A) { - __builtin_ia32_storedqusi512_mask ((__v16si *)__P, (__v16si) __A, + __builtin_ia32_storedqusi512_mask ((int *)__P, (__v16si) __A, (__mmask16) __U); } static __inline void __DEFAULT_FN_ATTRS _mm512_mask_storeu_pd(void *__P, __mmask8 __U, __m512d __A) { - __builtin_ia32_storeupd512_mask ((__v8df *)__P, (__v8df) __A, (__mmask8) __U); + __builtin_ia32_storeupd512_mask ((double *)__P, (__v8df) __A, (__mmask8) __U); } static __inline void __DEFAULT_FN_ATTRS _mm512_storeu_pd(void *__P, __m512d __A) { - __builtin_ia32_storeupd512_mask((__v8df *)__P, (__v8df)__A, (__mmask8)-1); + __builtin_ia32_storeupd512_mask((double *)__P, (__v8df)__A, (__mmask8)-1); } static __inline void __DEFAULT_FN_ATTRS _mm512_mask_storeu_ps(void *__P, __mmask16 __U, __m512 __A) { - __builtin_ia32_storeups512_mask ((__v16sf *)__P, (__v16sf) __A, + __builtin_ia32_storeups512_mask ((float *)__P, (__v16sf) __A, (__mmask16) __U); } static __inline void __DEFAULT_FN_ATTRS _mm512_storeu_ps(void *__P, __m512 __A) { - __builtin_ia32_storeups512_mask((__v16sf *)__P, (__v16sf)__A, (__mmask16)-1); + __builtin_ia32_storeups512_mask((float *)__P, (__v16sf)__A, (__mmask16)-1); } static __inline void __DEFAULT_FN_ATTRS @@ -2106,6 +4700,24 @@ _mm512_store_ps(void *__P, __m512 __A) *(__m512*)__P = __A; } +static __inline void __DEFAULT_FN_ATTRS +_mm512_store_si512 (void *__P, __m512i __A) +{ + *(__m512i *) __P = __A; +} + +static __inline void __DEFAULT_FN_ATTRS +_mm512_store_epi32 (void *__P, __m512i __A) +{ + *(__m512i *) __P = __A; +} + +static __inline void __DEFAULT_FN_ATTRS +_mm512_store_epi64 (void *__P, __m512i __A) +{ + *(__m512i *) __P = __A; +} + /* Mask ops */ static __inline __mmask16 __DEFAULT_FN_ATTRS @@ -2404,54 +5016,5229 @@ _mm512_mask_cmpneq_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) { __u); } +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_cvtepi8_epi32(__m128i __A) +{ + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m512i)__builtin_convertvector((__v16qs)__A, __v16si); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvtepi8_epi32(__m512i __W, __mmask16 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_cvtepi8_epi32(__A), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepi8_epi32(__mmask16 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_cvtepi8_epi32(__A), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_cvtepi8_epi64(__m128i __A) +{ + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m512i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__A, (__v16qs)__A, 0, 1, 2, 3, 4, 5, 6, 7), __v8di); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvtepi8_epi64(__m512i __W, __mmask8 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepi8_epi64(__A), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepi8_epi64(__A), + (__v8di)_mm512_setzero_si512 ()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_cvtepi32_epi64(__m256i __X) +{ + return (__m512i)__builtin_convertvector((__v8si)__X, __v8di); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvtepi32_epi64(__m512i __W, __mmask8 __U, __m256i __X) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepi32_epi64(__X), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepi32_epi64(__mmask8 __U, __m256i __X) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepi32_epi64(__X), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_cvtepi16_epi32(__m256i __A) +{ + return (__m512i)__builtin_convertvector((__v16hi)__A, __v16si); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvtepi16_epi32(__m512i __W, __mmask16 __U, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_cvtepi16_epi32(__A), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepi16_epi32(__mmask16 __U, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_cvtepi16_epi32(__A), + (__v16si)_mm512_setzero_si512 ()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_cvtepi16_epi64(__m128i __A) +{ + return (__m512i)__builtin_convertvector((__v8hi)__A, __v8di); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvtepi16_epi64(__m512i __W, __mmask8 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepi16_epi64(__A), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepi16_epi64(__A), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_cvtepu8_epi32(__m128i __A) +{ + return (__m512i)__builtin_convertvector((__v16qu)__A, __v16si); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvtepu8_epi32(__m512i __W, __mmask16 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_cvtepu8_epi32(__A), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepu8_epi32(__mmask16 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_cvtepu8_epi32(__A), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_cvtepu8_epi64(__m128i __A) +{ + return (__m512i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__A, (__v16qu)__A, 0, 1, 2, 3, 4, 5, 6, 7), __v8di); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvtepu8_epi64(__m512i __W, __mmask8 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepu8_epi64(__A), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepu8_epi64(__A), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_cvtepu32_epi64(__m256i __X) +{ + return (__m512i)__builtin_convertvector((__v8su)__X, __v8di); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvtepu32_epi64(__m512i __W, __mmask8 __U, __m256i __X) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepu32_epi64(__X), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepu32_epi64(__mmask8 __U, __m256i __X) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepu32_epi64(__X), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_cvtepu16_epi32(__m256i __A) +{ + return (__m512i)__builtin_convertvector((__v16hu)__A, __v16si); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvtepu16_epi32(__m512i __W, __mmask16 __U, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_cvtepu16_epi32(__A), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepu16_epi32(__mmask16 __U, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_cvtepu16_epi32(__A), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_cvtepu16_epi64(__m128i __A) +{ + return (__m512i)__builtin_convertvector((__v8hu)__A, __v8di); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvtepu16_epi64(__m512i __W, __mmask8 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepu16_epi64(__A), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepu16_epi64(__A), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_rorv_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_prorvd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_rorv_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_prorvd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_rorv_epi32 (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_prorvd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_rorv_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_prorvq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_rorv_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_prorvq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_rorv_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_prorvq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + + + #define _mm512_cmp_epi32_mask(a, b, p) __extension__ ({ \ - __m512i __a = (a); \ - __m512i __b = (b); \ - (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, (p), \ + (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \ + (__v16si)(__m512i)(b), (int)(p), \ (__mmask16)-1); }) #define _mm512_cmp_epu32_mask(a, b, p) __extension__ ({ \ - __m512i __a = (a); \ - __m512i __b = (b); \ - (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, (p), \ + (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \ + (__v16si)(__m512i)(b), (int)(p), \ (__mmask16)-1); }) #define _mm512_cmp_epi64_mask(a, b, p) __extension__ ({ \ - __m512i __a = (a); \ - __m512i __b = (b); \ - (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, (p), \ + (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \ + (__v8di)(__m512i)(b), (int)(p), \ (__mmask8)-1); }) #define _mm512_cmp_epu64_mask(a, b, p) __extension__ ({ \ - __m512i __a = (a); \ - __m512i __b = (b); \ - (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, (p), \ + (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \ + (__v8di)(__m512i)(b), (int)(p), \ (__mmask8)-1); }) #define _mm512_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \ - __m512i __a = (a); \ - __m512i __b = (b); \ - (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, (p), \ + (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \ + (__v16si)(__m512i)(b), (int)(p), \ (__mmask16)(m)); }) #define _mm512_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \ - __m512i __a = (a); \ - __m512i __b = (b); \ - (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, (p), \ + (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \ + (__v16si)(__m512i)(b), (int)(p), \ (__mmask16)(m)); }) #define _mm512_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \ - __m512i __a = (a); \ - __m512i __b = (b); \ - (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, (p), \ + (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \ + (__v8di)(__m512i)(b), (int)(p), \ (__mmask8)(m)); }) #define _mm512_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \ - __m512i __a = (a); \ - __m512i __b = (b); \ - (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, (p), \ + (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \ + (__v8di)(__m512i)(b), (int)(p), \ (__mmask8)(m)); }) +#define _mm512_rol_epi32(a, b) __extension__ ({ \ + (__m512i)__builtin_ia32_prold512_mask((__v16si)(__m512i)(a), (int)(b), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)-1); }) + +#define _mm512_mask_rol_epi32(W, U, a, b) __extension__ ({ \ + (__m512i)__builtin_ia32_prold512_mask((__v16si)(__m512i)(a), (int)(b), \ + (__v16si)(__m512i)(W), \ + (__mmask16)(U)); }) + +#define _mm512_maskz_rol_epi32(U, a, b) __extension__ ({ \ + (__m512i)__builtin_ia32_prold512_mask((__v16si)(__m512i)(a), (int)(b), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)(U)); }) + +#define _mm512_rol_epi64(a, b) __extension__ ({ \ + (__m512i)__builtin_ia32_prolq512_mask((__v8di)(__m512i)(a), (int)(b), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1); }) + +#define _mm512_mask_rol_epi64(W, U, a, b) __extension__ ({ \ + (__m512i)__builtin_ia32_prolq512_mask((__v8di)(__m512i)(a), (int)(b), \ + (__v8di)(__m512i)(W), (__mmask8)(U)); }) + +#define _mm512_maskz_rol_epi64(U, a, b) __extension__ ({ \ + (__m512i)__builtin_ia32_prolq512_mask((__v8di)(__m512i)(a), (int)(b), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U)); }) +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_rolv_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_prolvd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_rolv_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_prolvd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_rolv_epi32 (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_prolvd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_rolv_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_prolvq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_rolv_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_prolvq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_rolv_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_prolvq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +#define _mm512_ror_epi32(A, B) __extension__ ({ \ + (__m512i)__builtin_ia32_prord512_mask((__v16si)(__m512i)(A), (int)(B), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)-1); }) + +#define _mm512_mask_ror_epi32(W, U, A, B) __extension__ ({ \ + (__m512i)__builtin_ia32_prord512_mask((__v16si)(__m512i)(A), (int)(B), \ + (__v16si)(__m512i)(W), \ + (__mmask16)(U)); }) + +#define _mm512_maskz_ror_epi32(U, A, B) __extension__ ({ \ + (__m512i)__builtin_ia32_prord512_mask((__v16si)(__m512i)(A), (int)(B), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)(U)); }) + +#define _mm512_ror_epi64(A, B) __extension__ ({ \ + (__m512i)__builtin_ia32_prorq512_mask((__v8di)(__m512i)(A), (int)(B), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1); }) + +#define _mm512_mask_ror_epi64(W, U, A, B) __extension__ ({ \ + (__m512i)__builtin_ia32_prorq512_mask((__v8di)(__m512i)(A), (int)(B), \ + (__v8di)(__m512i)(W), (__mmask8)(U)); }) + +#define _mm512_maskz_ror_epi64(U, A, B) __extension__ ({ \ + (__m512i)__builtin_ia32_prorq512_mask((__v8di)(__m512i)(A), (int)(B), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U)); }) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_slli_epi32(__m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_pslldi512((__v16si)__A, __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_slli_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_slli_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_slli_epi32(__mmask16 __U, __m512i __A, int __B) { + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_slli_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_slli_epi64(__m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_psllqi512((__v8di)__A, __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_slli_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_slli_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_slli_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_srli_epi32(__m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_psrldi512((__v16si)__A, __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_srli_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srli_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_srli_epi32(__mmask16 __U, __m512i __A, int __B) { + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srli_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_srli_epi64(__m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_psrlqi512((__v8di)__A, __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_srli_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srli_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_srli_epi64(__mmask8 __U, __m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srli_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_load_epi32 (__m512i __W, __mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_movdqa32load512_mask ((const __v16si *) __P, + (__v16si) __W, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_load_epi32 (__mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_movdqa32load512_mask ((const __v16si *) __P, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_store_epi32 (void *__P, __mmask16 __U, __m512i __A) +{ + __builtin_ia32_movdqa32store512_mask ((__v16si *) __P, (__v16si) __A, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_mov_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U, + (__v16si) __A, + (__v16si) __W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_mov_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U, + (__v16si) __A, + (__v16si) _mm512_setzero_si512 ()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_mov_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U, + (__v8di) __A, + (__v8di) __W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_mov_epi64 (__mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U, + (__v8di) __A, + (__v8di) _mm512_setzero_si512 ()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_load_epi64 (__m512i __W, __mmask8 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_movdqa64load512_mask ((const __v8di *) __P, + (__v8di) __W, + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_load_epi64 (__mmask8 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_movdqa64load512_mask ((const __v8di *) __P, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_store_epi64 (void *__P, __mmask8 __U, __m512i __A) +{ + __builtin_ia32_movdqa64store512_mask ((__v8di *) __P, (__v8di) __A, + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_movedup_pd (__m512d __A) +{ + return (__m512d)__builtin_shufflevector((__v8df)__A, (__v8df)__A, + 0, 0, 2, 2, 4, 4, 6, 6); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_movedup_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_movedup_pd(__A), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_movedup_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_movedup_pd(__A), + (__v8df)_mm512_setzero_pd()); +} + +#define _mm512_fixupimm_round_pd(A, B, C, imm, R) __extension__ ({ \ + (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8di)(__m512i)(C), (int)(imm), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_fixupimm_round_pd(A, U, B, C, imm, R) __extension__ ({ \ + (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8di)(__m512i)(C), (int)(imm), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_fixupimm_pd(A, B, C, imm) __extension__ ({ \ + (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8di)(__m512i)(C), (int)(imm), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_mask_fixupimm_pd(A, U, B, C, imm) __extension__ ({ \ + (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8di)(__m512i)(C), (int)(imm), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_maskz_fixupimm_round_pd(U, A, B, C, imm, R) __extension__ ({ \ + (__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8di)(__m512i)(C), \ + (int)(imm), (__mmask8)(U), \ + (int)(R)); }) + +#define _mm512_maskz_fixupimm_pd(U, A, B, C, imm) __extension__ ({ \ + (__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8di)(__m512i)(C), \ + (int)(imm), (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_fixupimm_round_ps(A, B, C, imm, R) __extension__ ({ \ + (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16si)(__m512i)(C), (int)(imm), \ + (__mmask16)-1, (int)(R)); }) + +#define _mm512_mask_fixupimm_round_ps(A, U, B, C, imm, R) __extension__ ({ \ + (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16si)(__m512i)(C), (int)(imm), \ + (__mmask16)(U), (int)(R)); }) + +#define _mm512_fixupimm_ps(A, B, C, imm) __extension__ ({ \ + (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16si)(__m512i)(C), (int)(imm), \ + (__mmask16)-1, \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_mask_fixupimm_ps(A, U, B, C, imm) __extension__ ({ \ + (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16si)(__m512i)(C), (int)(imm), \ + (__mmask16)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_maskz_fixupimm_round_ps(U, A, B, C, imm, R) __extension__ ({ \ + (__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16si)(__m512i)(C), \ + (int)(imm), (__mmask16)(U), \ + (int)(R)); }) + +#define _mm512_maskz_fixupimm_ps(U, A, B, C, imm) __extension__ ({ \ + (__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16si)(__m512i)(C), \ + (int)(imm), (__mmask16)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_fixupimm_round_sd(A, B, C, imm, R) __extension__ ({ \ + (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm_mask_fixupimm_round_sd(A, U, B, C, imm, R) __extension__ ({ \ + (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm_fixupimm_sd(A, B, C, imm) __extension__ ({ \ + (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_mask_fixupimm_sd(A, U, B, C, imm) __extension__ ({ \ + (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_maskz_fixupimm_round_sd(U, A, B, C, imm, R) __extension__ ({ \ + (__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm_maskz_fixupimm_sd(U, A, B, C, imm) __extension__ ({ \ + (__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_fixupimm_round_ss(A, B, C, imm, R) __extension__ ({ \ + (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm_mask_fixupimm_round_ss(A, U, B, C, imm, R) __extension__ ({ \ + (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm_fixupimm_ss(A, B, C, imm) __extension__ ({ \ + (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_mask_fixupimm_ss(A, U, B, C, imm) __extension__ ({ \ + (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_maskz_fixupimm_round_ss(U, A, B, C, imm, R) __extension__ ({ \ + (__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm_maskz_fixupimm_ss(U, A, B, C, imm) __extension__ ({ \ + (__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_getexp_round_sd(A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)); }) + + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_getexp_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_getexpsd128_round_mask ((__v2df) __A, + (__v2df) __B, (__v2df) _mm_setzero_pd(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_getexp_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_getexpsd128_round_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_getexp_round_sd(W, U, A, B, R) __extension__ ({\ + (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_getexp_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_getexpsd128_round_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_getexp_round_sd(U, A, B, R) __extension__ ({\ + (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm_getexp_round_ss(A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)); }) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_getexp_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A, + (__v4sf) __B, (__v4sf) _mm_setzero_ps(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_getexp_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_getexp_round_ss(W, U, A, B, R) __extension__ ({\ + (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_getexp_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_getexp_round_ss(U, A, B, R) __extension__ ({\ + (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm_getmant_round_sd(A, B, C, D, R) __extension__ ({ \ + (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (int)(((D)<<2) | (C)), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm_getmant_sd(A, B, C, D) __extension__ ({ \ + (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (int)(((D)<<2) | (C)), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_mask_getmant_sd(W, U, A, B, C, D) __extension__ ({\ + (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (int)(((D)<<2) | (C)), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_mask_getmant_round_sd(W, U, A, B, C, D, R)({\ + (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (int)(((D)<<2) | (C)), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm_maskz_getmant_sd(U, A, B, C, D) __extension__ ({\ + (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (int)(((D)<<2) | (C)), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_maskz_getmant_round_sd(U, A, B, C, D, R) __extension__ ({\ + (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (int)(((D)<<2) | (C)), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm_getmant_round_ss(A, B, C, D, R) __extension__ ({ \ + (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (int)(((D)<<2) | (C)), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm_getmant_ss(A, B, C, D) __extension__ ({ \ + (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (int)(((D)<<2) | (C)), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_mask_getmant_ss(W, U, A, B, C, D) __extension__ ({\ + (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (int)(((D)<<2) | (C)), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_mask_getmant_round_ss(W, U, A, B, C, D, R)({\ + (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (int)(((D)<<2) | (C)), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm_maskz_getmant_ss(U, A, B, C, D) __extension__ ({\ + (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (int)(((D)<<2) | (C)), \ + (__v4sf)_mm_setzero_pd(), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_maskz_getmant_round_ss(U, A, B, C, D, R) __extension__ ({\ + (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (int)(((D)<<2) | (C)), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_kmov (__mmask16 __A) +{ + return __A; +} + +#define _mm_comi_round_sd(A, B, P, R) __extension__ ({\ + (int)__builtin_ia32_vcomisd((__v2df)(__m128d)(A), (__v2df)(__m128d)(B), \ + (int)(P), (int)(R)); }) + +#define _mm_comi_round_ss(A, B, P, R) __extension__ ({\ + (int)__builtin_ia32_vcomiss((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), \ + (int)(P), (int)(R)); }) + +#ifdef __x86_64__ +#define _mm_cvt_roundsd_si64(A, R) __extension__ ({ \ + (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)); }) +#endif + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask2_permutex2var_epi32 (__m512i __A, __m512i __I, + __mmask16 __U, __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermi2vard512_mask ((__v16si) __A, + (__v16si) __I + /* idx */ , + (__v16si) __B, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_sll_epi32(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_pslld512((__v16si) __A, (__v4si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_sll_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_sll_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_sll_epi32(__mmask16 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_sll_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_sll_epi64(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_psllq512((__v8di)__A, (__v2di)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_sll_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_sll_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_sll_epi64(__mmask8 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_sll_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_sllv_epi32(__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_psllv16si((__v16si)__X, (__v16si)__Y); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_sllv_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_sllv_epi32(__X, __Y), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_sllv_epi32(__mmask16 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_sllv_epi32(__X, __Y), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_sllv_epi64(__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_psllv8di((__v8di)__X, (__v8di)__Y); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_sllv_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_sllv_epi64(__X, __Y), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_sllv_epi64(__mmask8 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_sllv_epi64(__X, __Y), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_sra_epi32(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_psrad512((__v16si) __A, (__v4si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_sra_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_sra_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_sra_epi32(__mmask16 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_sra_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_sra_epi64(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_psraq512((__v8di)__A, (__v2di)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_sra_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_sra_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_sra_epi64(__mmask8 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_sra_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_srav_epi32(__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_psrav16si((__v16si)__X, (__v16si)__Y); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_srav_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srav_epi32(__X, __Y), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_srav_epi32(__mmask16 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srav_epi32(__X, __Y), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_srav_epi64(__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_psrav8di((__v8di)__X, (__v8di)__Y); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_srav_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srav_epi64(__X, __Y), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_srav_epi64(__mmask8 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srav_epi64(__X, __Y), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_srl_epi32(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_psrld512((__v16si) __A, (__v4si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_srl_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srl_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_srl_epi32(__mmask16 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srl_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_srl_epi64(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_psrlq512((__v8di)__A, (__v2di)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_srl_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srl_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_srl_epi64(__mmask8 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srl_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_srlv_epi32(__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_psrlv16si((__v16si)__X, (__v16si)__Y); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_srlv_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srlv_epi32(__X, __Y), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_srlv_epi32(__mmask16 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srlv_epi32(__X, __Y), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_srlv_epi64 (__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_psrlv8di((__v8di)__X, (__v8di)__Y); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_srlv_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srlv_epi64(__X, __Y), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_srlv_epi64(__mmask8 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srlv_epi64(__X, __Y), + (__v8di)_mm512_setzero_si512()); +} + +#define _mm512_ternarylogic_epi32(A, B, C, imm) __extension__ ({ \ + (__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \ + (__v16si)(__m512i)(B), \ + (__v16si)(__m512i)(C), (int)(imm), \ + (__mmask16)-1); }) + +#define _mm512_mask_ternarylogic_epi32(A, U, B, C, imm) __extension__ ({ \ + (__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \ + (__v16si)(__m512i)(B), \ + (__v16si)(__m512i)(C), (int)(imm), \ + (__mmask16)(U)); }) + +#define _mm512_maskz_ternarylogic_epi32(U, A, B, C, imm) __extension__ ({ \ + (__m512i)__builtin_ia32_pternlogd512_maskz((__v16si)(__m512i)(A), \ + (__v16si)(__m512i)(B), \ + (__v16si)(__m512i)(C), \ + (int)(imm), (__mmask16)(U)); }) + +#define _mm512_ternarylogic_epi64(A, B, C, imm) __extension__ ({ \ + (__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \ + (__v8di)(__m512i)(B), \ + (__v8di)(__m512i)(C), (int)(imm), \ + (__mmask8)-1); }) + +#define _mm512_mask_ternarylogic_epi64(A, U, B, C, imm) __extension__ ({ \ + (__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \ + (__v8di)(__m512i)(B), \ + (__v8di)(__m512i)(C), (int)(imm), \ + (__mmask8)(U)); }) + +#define _mm512_maskz_ternarylogic_epi64(U, A, B, C, imm) __extension__ ({ \ + (__m512i)__builtin_ia32_pternlogq512_maskz((__v8di)(__m512i)(A), \ + (__v8di)(__m512i)(B), \ + (__v8di)(__m512i)(C), (int)(imm), \ + (__mmask8)(U)); }) + +#ifdef __x86_64__ +#define _mm_cvt_roundsd_i64(A, R) __extension__ ({ \ + (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)); }) +#endif + +#define _mm_cvt_roundsd_si32(A, R) __extension__ ({ \ + (int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R)); }) + +#define _mm_cvt_roundsd_i32(A, R) __extension__ ({ \ + (int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R)); }) + +#define _mm_cvt_roundsd_u32(A, R) __extension__ ({ \ + (unsigned int)__builtin_ia32_vcvtsd2usi32((__v2df)(__m128d)(A), (int)(R)); }) + +static __inline__ unsigned __DEFAULT_FN_ATTRS +_mm_cvtsd_u32 (__m128d __A) +{ + return (unsigned) __builtin_ia32_vcvtsd2usi32 ((__v2df) __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __x86_64__ +#define _mm_cvt_roundsd_u64(A, R) __extension__ ({ \ + (unsigned long long)__builtin_ia32_vcvtsd2usi64((__v2df)(__m128d)(A), \ + (int)(R)); }) + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_mm_cvtsd_u64 (__m128d __A) +{ + return (unsigned long long) __builtin_ia32_vcvtsd2usi64 ((__v2df) + __A, + _MM_FROUND_CUR_DIRECTION); +} +#endif + +#define _mm_cvt_roundss_si32(A, R) __extension__ ({ \ + (int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)); }) + +#define _mm_cvt_roundss_i32(A, R) __extension__ ({ \ + (int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)); }) + +#ifdef __x86_64__ +#define _mm_cvt_roundss_si64(A, R) __extension__ ({ \ + (long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)); }) + +#define _mm_cvt_roundss_i64(A, R) __extension__ ({ \ + (long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)); }) +#endif + +#define _mm_cvt_roundss_u32(A, R) __extension__ ({ \ + (unsigned int)__builtin_ia32_vcvtss2usi32((__v4sf)(__m128)(A), (int)(R)); }) + +static __inline__ unsigned __DEFAULT_FN_ATTRS +_mm_cvtss_u32 (__m128 __A) +{ + return (unsigned) __builtin_ia32_vcvtss2usi32 ((__v4sf) __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __x86_64__ +#define _mm_cvt_roundss_u64(A, R) __extension__ ({ \ + (unsigned long long)__builtin_ia32_vcvtss2usi64((__v4sf)(__m128)(A), \ + (int)(R)); }) + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_mm_cvtss_u64 (__m128 __A) +{ + return (unsigned long long) __builtin_ia32_vcvtss2usi64 ((__v4sf) + __A, + _MM_FROUND_CUR_DIRECTION); +} +#endif + +#define _mm_cvtt_roundsd_i32(A, R) __extension__ ({ \ + (int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R)); }) + +#define _mm_cvtt_roundsd_si32(A, R) __extension__ ({ \ + (int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R)); }) + +static __inline__ int __DEFAULT_FN_ATTRS +_mm_cvttsd_i32 (__m128d __A) +{ + return (int) __builtin_ia32_vcvttsd2si32 ((__v2df) __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __x86_64__ +#define _mm_cvtt_roundsd_si64(A, R) __extension__ ({ \ + (long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R)); }) + +#define _mm_cvtt_roundsd_i64(A, R) __extension__ ({ \ + (long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R)); }) + +static __inline__ long long __DEFAULT_FN_ATTRS +_mm_cvttsd_i64 (__m128d __A) +{ + return (long long) __builtin_ia32_vcvttsd2si64 ((__v2df) __A, + _MM_FROUND_CUR_DIRECTION); +} +#endif + +#define _mm_cvtt_roundsd_u32(A, R) __extension__ ({ \ + (unsigned int)__builtin_ia32_vcvttsd2usi32((__v2df)(__m128d)(A), (int)(R)); }) + +static __inline__ unsigned __DEFAULT_FN_ATTRS +_mm_cvttsd_u32 (__m128d __A) +{ + return (unsigned) __builtin_ia32_vcvttsd2usi32 ((__v2df) __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __x86_64__ +#define _mm_cvtt_roundsd_u64(A, R) __extension__ ({ \ + (unsigned long long)__builtin_ia32_vcvttsd2usi64((__v2df)(__m128d)(A), \ + (int)(R)); }) + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_mm_cvttsd_u64 (__m128d __A) +{ + return (unsigned long long) __builtin_ia32_vcvttsd2usi64 ((__v2df) + __A, + _MM_FROUND_CUR_DIRECTION); +} +#endif + +#define _mm_cvtt_roundss_i32(A, R) __extension__ ({ \ + (int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R)); }) + +#define _mm_cvtt_roundss_si32(A, R) __extension__ ({ \ + (int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R)); }) + +static __inline__ int __DEFAULT_FN_ATTRS +_mm_cvttss_i32 (__m128 __A) +{ + return (int) __builtin_ia32_vcvttss2si32 ((__v4sf) __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __x86_64__ +#define _mm_cvtt_roundss_i64(A, R) __extension__ ({ \ + (long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R)); }) + +#define _mm_cvtt_roundss_si64(A, R) __extension__ ({ \ + (long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R)); }) + +static __inline__ long long __DEFAULT_FN_ATTRS +_mm_cvttss_i64 (__m128 __A) +{ + return (long long) __builtin_ia32_vcvttss2si64 ((__v4sf) __A, + _MM_FROUND_CUR_DIRECTION); +} +#endif + +#define _mm_cvtt_roundss_u32(A, R) __extension__ ({ \ + (unsigned int)__builtin_ia32_vcvttss2usi32((__v4sf)(__m128)(A), (int)(R)); }) + +static __inline__ unsigned __DEFAULT_FN_ATTRS +_mm_cvttss_u32 (__m128 __A) +{ + return (unsigned) __builtin_ia32_vcvttss2usi32 ((__v4sf) __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __x86_64__ +#define _mm_cvtt_roundss_u64(A, R) __extension__ ({ \ + (unsigned long long)__builtin_ia32_vcvttss2usi64((__v4sf)(__m128)(A), \ + (int)(R)); }) + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_mm_cvttss_u64 (__m128 __A) +{ + return (unsigned long long) __builtin_ia32_vcvttss2usi64 ((__v4sf) + __A, + _MM_FROUND_CUR_DIRECTION); +} +#endif + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask2_permutex2var_pd (__m512d __A, __m512i __I, __mmask8 __U, + __m512d __B) +{ + return (__m512d) __builtin_ia32_vpermi2varpd512_mask ((__v8df) __A, + (__v8di) __I + /* idx */ , + (__v8df) __B, + (__mmask8) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask2_permutex2var_ps (__m512 __A, __m512i __I, __mmask16 __U, + __m512 __B) +{ + return (__m512) __builtin_ia32_vpermi2varps512_mask ((__v16sf) __A, + (__v16si) __I + /* idx */ , + (__v16sf) __B, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask2_permutex2var_epi64 (__m512i __A, __m512i __I, + __mmask8 __U, __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermi2varq512_mask ((__v8di) __A, + (__v8di) __I + /* idx */ , + (__v8di) __B, + (__mmask8) __U); +} + +#define _mm512_permute_pd(X, C) __extension__ ({ \ + (__m512d)__builtin_shufflevector((__v8df)(__m512d)(X), \ + (__v8df)_mm512_undefined_pd(), \ + 0 + (((C) >> 0) & 0x1), \ + 0 + (((C) >> 1) & 0x1), \ + 2 + (((C) >> 2) & 0x1), \ + 2 + (((C) >> 3) & 0x1), \ + 4 + (((C) >> 4) & 0x1), \ + 4 + (((C) >> 5) & 0x1), \ + 6 + (((C) >> 6) & 0x1), \ + 6 + (((C) >> 7) & 0x1)); }) + +#define _mm512_mask_permute_pd(W, U, X, C) __extension__ ({ \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_permute_pd((X), (C)), \ + (__v8df)(__m512d)(W)); }) + +#define _mm512_maskz_permute_pd(U, X, C) __extension__ ({ \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_permute_pd((X), (C)), \ + (__v8df)_mm512_setzero_pd()); }) + +#define _mm512_permute_ps(X, C) __extension__ ({ \ + (__m512)__builtin_shufflevector((__v16sf)(__m512)(X), \ + (__v16sf)_mm512_undefined_ps(), \ + 0 + (((C) >> 0) & 0x3), \ + 0 + (((C) >> 2) & 0x3), \ + 0 + (((C) >> 4) & 0x3), \ + 0 + (((C) >> 6) & 0x3), \ + 4 + (((C) >> 0) & 0x3), \ + 4 + (((C) >> 2) & 0x3), \ + 4 + (((C) >> 4) & 0x3), \ + 4 + (((C) >> 6) & 0x3), \ + 8 + (((C) >> 0) & 0x3), \ + 8 + (((C) >> 2) & 0x3), \ + 8 + (((C) >> 4) & 0x3), \ + 8 + (((C) >> 6) & 0x3), \ + 12 + (((C) >> 0) & 0x3), \ + 12 + (((C) >> 2) & 0x3), \ + 12 + (((C) >> 4) & 0x3), \ + 12 + (((C) >> 6) & 0x3)); }) + +#define _mm512_mask_permute_ps(W, U, X, C) __extension__ ({ \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_permute_ps((X), (C)), \ + (__v16sf)(__m512)(W)); }) + +#define _mm512_maskz_permute_ps(U, X, C) __extension__ ({ \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_permute_ps((X), (C)), \ + (__v16sf)_mm512_setzero_ps()); }) + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_permutevar_pd(__m512d __A, __m512i __C) +{ + return (__m512d)__builtin_ia32_vpermilvarpd512((__v8df)__A, (__v8di)__C); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_permutevar_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512i __C) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_permutevar_pd(__A, __C), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_permutevar_pd(__mmask8 __U, __m512d __A, __m512i __C) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_permutevar_pd(__A, __C), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_permutevar_ps(__m512 __A, __m512i __C) +{ + return (__m512)__builtin_ia32_vpermilvarps512((__v16sf)__A, (__v16si)__C); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_permutevar_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512i __C) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_permutevar_ps(__A, __C), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_permutevar_ps(__mmask16 __U, __m512 __A, __m512i __C) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_permutevar_ps(__A, __C), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline __m512d __DEFAULT_FN_ATTRS +_mm512_permutex2var_pd(__m512d __A, __m512i __I, __m512d __B) +{ + return (__m512d) __builtin_ia32_vpermt2varpd512_mask ((__v8di) __I + /* idx */ , + (__v8df) __A, + (__v8df) __B, + (__mmask8) -1); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_permutex2var_pd (__m512d __A, __mmask8 __U, __m512i __I, __m512d __B) +{ + return (__m512d) __builtin_ia32_vpermt2varpd512_mask ((__v8di) __I + /* idx */ , + (__v8df) __A, + (__v8df) __B, + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_permutex2var_pd (__mmask8 __U, __m512d __A, __m512i __I, + __m512d __B) +{ + return (__m512d) __builtin_ia32_vpermt2varpd512_maskz ((__v8di) __I + /* idx */ , + (__v8df) __A, + (__v8df) __B, + (__mmask8) __U); +} + +static __inline __m512 __DEFAULT_FN_ATTRS +_mm512_permutex2var_ps(__m512 __A, __m512i __I, __m512 __B) +{ + return (__m512) __builtin_ia32_vpermt2varps512_mask ((__v16si) __I + /* idx */ , + (__v16sf) __A, + (__v16sf) __B, + (__mmask16) -1); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_permutex2var_ps (__m512 __A, __mmask16 __U, __m512i __I, __m512 __B) +{ + return (__m512) __builtin_ia32_vpermt2varps512_mask ((__v16si) __I + /* idx */ , + (__v16sf) __A, + (__v16sf) __B, + (__mmask16) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_permutex2var_ps (__mmask16 __U, __m512 __A, __m512i __I, + __m512 __B) +{ + return (__m512) __builtin_ia32_vpermt2varps512_maskz ((__v16si) __I + /* idx */ , + (__v16sf) __A, + (__v16sf) __B, + (__mmask16) __U); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_testn_epi32_mask (__m512i __A, __m512i __B) +{ + return (__mmask16) __builtin_ia32_ptestnmd512 ((__v16si) __A, + (__v16si) __B, + (__mmask16) -1); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_mask_testn_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__mmask16) __builtin_ia32_ptestnmd512 ((__v16si) __A, + (__v16si) __B, __U); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm512_testn_epi64_mask (__m512i __A, __m512i __B) +{ + return (__mmask8) __builtin_ia32_ptestnmq512 ((__v8di) __A, + (__v8di) __B, + (__mmask8) -1); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm512_mask_testn_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__mmask8) __builtin_ia32_ptestnmq512 ((__v8di) __A, + (__v8di) __B, __U); +} + +#define _mm512_cvtt_roundpd_epu32(A, R) __extension__ ({ \ + (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \ + (__v8si)_mm256_undefined_si256(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_cvtt_roundpd_epu32(W, U, A, R) __extension__ ({ \ + (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \ + (__v8si)(__m256i)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_maskz_cvtt_roundpd_epu32(U, A, R) __extension__ ({ \ + (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_cvttpd_epu32 (__m512d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A, + (__v8si) + _mm256_undefined_si256 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_mask_cvttpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A, + (__v8si) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_maskz_cvttpd_epu32 (__mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_roundscale_round_sd(A, B, imm, R) __extension__ ({ \ + (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(imm), \ + (int)(R)); }) + +#define _mm_roundscale_sd(A, B, imm) __extension__ ({ \ + (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(imm), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_mask_roundscale_sd(W, U, A, B, imm) __extension__ ({ \ + (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(imm), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_mask_roundscale_round_sd(W, U, A, B, I, R) __extension__ ({ \ + (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(I), \ + (int)(R)); }) + +#define _mm_maskz_roundscale_sd(U, A, B, I) __extension__ ({ \ + (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(I), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_maskz_roundscale_round_sd(U, A, B, I, R) __extension__ ({ \ + (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(I), \ + (int)(R)); }) + +#define _mm_roundscale_round_ss(A, B, imm, R) __extension__ ({ \ + (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(imm), \ + (int)(R)); }) + +#define _mm_roundscale_ss(A, B, imm) __extension__ ({ \ + (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(imm), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_mask_roundscale_ss(W, U, A, B, I) __extension__ ({ \ + (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U), (int)(I), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_mask_roundscale_round_ss(W, U, A, B, I, R) __extension__ ({ \ + (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U), (int)(I), \ + (int)(R)); }) + +#define _mm_maskz_roundscale_ss(U, A, B, I) __extension__ ({ \ + (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(I), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_maskz_roundscale_round_ss(U, A, B, I, R) __extension__ ({ \ + (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(I), \ + (int)(R)); }) + +#define _mm512_scalef_round_pd(A, B, R) __extension__ ({ \ + (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)_mm512_undefined_pd(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_scalef_round_pd(W, U, A, B, R) __extension__ ({ \ + (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_maskz_scalef_round_pd(U, A, B, R) __extension__ ({ \ + (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_scalef_pd (__m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_scalef_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_scalef_pd (__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_scalef_round_ps(A, B, R) __extension__ ({ \ + (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1, (int)(R)); }) + +#define _mm512_mask_scalef_round_ps(W, U, A, B, R) __extension__ ({ \ + (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), (int)(R)); }) + +#define _mm512_maskz_scalef_round_ps(U, A, B, R) __extension__ ({ \ + (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R)); }) + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_scalef_ps (__m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_scalef_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_scalef_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_scalef_round_sd(A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_scalef_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_scalefsd_round_mask ((__v2df) __A, + (__v2df)( __B), (__v2df) _mm_setzero_pd(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_scalef_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_scalefsd_round_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_scalef_round_sd(W, U, A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_scalef_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_scalefsd_round_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_scalef_round_sd(U, A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm_scalef_round_ss(A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)); }) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_scalef_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_scalefss_round_mask ((__v4sf) __A, + (__v4sf)( __B), (__v4sf) _mm_setzero_ps(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_scalef_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_scalefss_round_mask ( (__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_scalef_round_ss(W, U, A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_scalefss_round_mask ( (__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_scalef_round_ss(U, A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_srai_epi32(__m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_psradi512((__v16si)__A, __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_srai_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, \ + (__v16si)_mm512_srai_epi32(__A, __B), \ + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_srai_epi32(__mmask16 __U, __m512i __A, int __B) { + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, \ + (__v16si)_mm512_srai_epi32(__A, __B), \ + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_srai_epi64(__m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_psraqi512((__v8di)__A, __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_srai_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, \ + (__v8di)_mm512_srai_epi64(__A, __B), \ + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, \ + (__v8di)_mm512_srai_epi64(__A, __B), \ + (__v8di)_mm512_setzero_si512()); +} + +#define _mm512_shuffle_f32x4(A, B, imm) __extension__ ({ \ + (__m512)__builtin_ia32_shuf_f32x4_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(imm), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1); }) + +#define _mm512_mask_shuffle_f32x4(W, U, A, B, imm) __extension__ ({ \ + (__m512)__builtin_ia32_shuf_f32x4_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(imm), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U)); }) + +#define _mm512_maskz_shuffle_f32x4(U, A, B, imm) __extension__ ({ \ + (__m512)__builtin_ia32_shuf_f32x4_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(imm), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U)); }) + +#define _mm512_shuffle_f64x2(A, B, imm) __extension__ ({ \ + (__m512d)__builtin_ia32_shuf_f64x2_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(imm), \ + (__v8df)_mm512_undefined_pd(), \ + (__mmask8)-1); }) + +#define _mm512_mask_shuffle_f64x2(W, U, A, B, imm) __extension__ ({ \ + (__m512d)__builtin_ia32_shuf_f64x2_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(imm), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U)); }) + +#define _mm512_maskz_shuffle_f64x2(U, A, B, imm) __extension__ ({ \ + (__m512d)__builtin_ia32_shuf_f64x2_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(imm), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U)); }) + +#define _mm512_shuffle_i32x4(A, B, imm) __extension__ ({ \ + (__m512i)__builtin_ia32_shuf_i32x4_mask((__v16si)(__m512i)(A), \ + (__v16si)(__m512i)(B), (int)(imm), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)-1); }) + +#define _mm512_mask_shuffle_i32x4(W, U, A, B, imm) __extension__ ({ \ + (__m512i)__builtin_ia32_shuf_i32x4_mask((__v16si)(__m512i)(A), \ + (__v16si)(__m512i)(B), (int)(imm), \ + (__v16si)(__m512i)(W), \ + (__mmask16)(U)); }) + +#define _mm512_maskz_shuffle_i32x4(U, A, B, imm) __extension__ ({ \ + (__m512i)__builtin_ia32_shuf_i32x4_mask((__v16si)(__m512i)(A), \ + (__v16si)(__m512i)(B), (int)(imm), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)(U)); }) + +#define _mm512_shuffle_i64x2(A, B, imm) __extension__ ({ \ + (__m512i)__builtin_ia32_shuf_i64x2_mask((__v8di)(__m512i)(A), \ + (__v8di)(__m512i)(B), (int)(imm), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1); }) + +#define _mm512_mask_shuffle_i64x2(W, U, A, B, imm) __extension__ ({ \ + (__m512i)__builtin_ia32_shuf_i64x2_mask((__v8di)(__m512i)(A), \ + (__v8di)(__m512i)(B), (int)(imm), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U)); }) + +#define _mm512_maskz_shuffle_i64x2(U, A, B, imm) __extension__ ({ \ + (__m512i)__builtin_ia32_shuf_i64x2_mask((__v8di)(__m512i)(A), \ + (__v8di)(__m512i)(B), (int)(imm), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U)); }) + +#define _mm512_shuffle_pd(A, B, M) __extension__ ({ \ + (__m512d)__builtin_shufflevector((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + 0 + (((M) >> 0) & 0x1), \ + 8 + (((M) >> 1) & 0x1), \ + 2 + (((M) >> 2) & 0x1), \ + 10 + (((M) >> 3) & 0x1), \ + 4 + (((M) >> 4) & 0x1), \ + 12 + (((M) >> 5) & 0x1), \ + 6 + (((M) >> 6) & 0x1), \ + 14 + (((M) >> 7) & 0x1)); }) + +#define _mm512_mask_shuffle_pd(W, U, A, B, M) __extension__ ({ \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_shuffle_pd((A), (B), (M)), \ + (__v8df)(__m512d)(W)); }) + +#define _mm512_maskz_shuffle_pd(U, A, B, M) __extension__ ({ \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_shuffle_pd((A), (B), (M)), \ + (__v8df)_mm512_setzero_pd()); }) + +#define _mm512_shuffle_ps(A, B, M) __extension__ ({ \ + (__m512d)__builtin_shufflevector((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + 0 + (((M) >> 0) & 0x3), \ + 0 + (((M) >> 2) & 0x3), \ + 16 + (((M) >> 4) & 0x3), \ + 16 + (((M) >> 6) & 0x3), \ + 4 + (((M) >> 0) & 0x3), \ + 4 + (((M) >> 2) & 0x3), \ + 20 + (((M) >> 4) & 0x3), \ + 20 + (((M) >> 6) & 0x3), \ + 8 + (((M) >> 0) & 0x3), \ + 8 + (((M) >> 2) & 0x3), \ + 24 + (((M) >> 4) & 0x3), \ + 24 + (((M) >> 6) & 0x3), \ + 12 + (((M) >> 0) & 0x3), \ + 12 + (((M) >> 2) & 0x3), \ + 28 + (((M) >> 4) & 0x3), \ + 28 + (((M) >> 6) & 0x3)); }) + +#define _mm512_mask_shuffle_ps(W, U, A, B, M) __extension__ ({ \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \ + (__v16sf)(__m512)(W)); }) + +#define _mm512_maskz_shuffle_ps(U, A, B, M) __extension__ ({ \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \ + (__v16sf)_mm512_setzero_ps()); }) + +#define _mm_sqrt_round_sd(A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_sqrt_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_sqrt_round_sd(W, U, A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_sqrt_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_sqrt_round_sd(U, A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm_sqrt_round_ss(A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)); }) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_sqrt_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_sqrtss_round_mask ( (__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_sqrt_round_ss(W, U, A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(R)); }) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_sqrt_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_sqrtss_round_mask ( (__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_sqrt_round_ss(U, A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_broadcast_f32x4 (__m128 __A) +{ + return (__m512) __builtin_ia32_broadcastf32x4_512 ((__v4sf) __A, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_broadcast_f32x4 (__m512 __O, __mmask16 __M, __m128 __A) +{ + return (__m512) __builtin_ia32_broadcastf32x4_512 ((__v4sf) __A, + (__v16sf) __O, + __M); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_broadcast_f32x4 (__mmask16 __M, __m128 __A) +{ + return (__m512) __builtin_ia32_broadcastf32x4_512 ((__v4sf) __A, + (__v16sf) + _mm512_setzero_ps (), + __M); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_broadcast_f64x4 (__m256d __A) +{ + return (__m512d) __builtin_ia32_broadcastf64x4_512 ((__v4df) __A, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_broadcast_f64x4 (__m512d __O, __mmask8 __M, __m256d __A) +{ + return (__m512d) __builtin_ia32_broadcastf64x4_512 ((__v4df) __A, + (__v8df) __O, + __M); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_broadcast_f64x4 (__mmask8 __M, __m256d __A) +{ + return (__m512d) __builtin_ia32_broadcastf64x4_512 ((__v4df) __A, + (__v8df) + _mm512_setzero_pd (), + __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_broadcast_i32x4 (__m128i __A) +{ + return (__m512i) __builtin_ia32_broadcasti32x4_512 ((__v4si) __A, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_broadcast_i32x4 (__m512i __O, __mmask16 __M, __m128i __A) +{ + return (__m512i) __builtin_ia32_broadcasti32x4_512 ((__v4si) __A, + (__v16si) __O, + __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_broadcast_i32x4 (__mmask16 __M, __m128i __A) +{ + return (__m512i) __builtin_ia32_broadcasti32x4_512 ((__v4si) __A, + (__v16si) + _mm512_setzero_si512 (), + __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_broadcast_i64x4 (__m256i __A) +{ + return (__m512i) __builtin_ia32_broadcasti64x4_512 ((__v4di) __A, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_broadcast_i64x4 (__m512i __O, __mmask8 __M, __m256i __A) +{ + return (__m512i) __builtin_ia32_broadcasti64x4_512 ((__v4di) __A, + (__v8di) __O, + __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_broadcast_i64x4 (__mmask8 __M, __m256i __A) +{ + return (__m512i) __builtin_ia32_broadcasti64x4_512 ((__v4di) __A, + (__v8di) + _mm512_setzero_si512 (), + __M); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_broadcastsd_pd (__m512d __O, __mmask8 __M, __m128d __A) +{ + return (__m512d)__builtin_ia32_selectpd_512(__M, + (__v8df) _mm512_broadcastsd_pd(__A), + (__v8df) __O); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_broadcastsd_pd (__mmask8 __M, __m128d __A) +{ + return (__m512d)__builtin_ia32_selectpd_512(__M, + (__v8df) _mm512_broadcastsd_pd(__A), + (__v8df) _mm512_setzero_pd()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_broadcastss_ps (__m512 __O, __mmask16 __M, __m128 __A) +{ + return (__m512)__builtin_ia32_selectps_512(__M, + (__v16sf) _mm512_broadcastss_ps(__A), + (__v16sf) __O); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_broadcastss_ps (__mmask16 __M, __m128 __A) +{ + return (__m512)__builtin_ia32_selectps_512(__M, + (__v16sf) _mm512_broadcastss_ps(__A), + (__v16sf) _mm512_setzero_ps()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_cvtsepi32_epi8 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A, + (__v16qi) _mm_undefined_si128 (), + (__mmask16) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_mask_cvtsepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtsepi32_epi8 (__mmask16 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A) +{ + __builtin_ia32_pmovsdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_cvtsepi32_epi16 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A, + (__v16hi) _mm256_undefined_si256 (), + (__mmask16) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_mask_cvtsepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A, + (__v16hi) __O, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtsepi32_epi16 (__mmask16 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A, + (__v16hi) _mm256_setzero_si256 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_cvtsepi32_storeu_epi16 (void *__P, __mmask16 __M, __m512i __A) +{ + __builtin_ia32_pmovsdw512mem_mask ((__v16hi*) __P, (__v16si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_cvtsepi64_epi8 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A, + (__v16qi) _mm_undefined_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtsepi64_epi8 (__mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovsqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_cvtsepi64_epi32 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A, + (__v8si) _mm256_undefined_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_mask_cvtsepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A, + (__v8si) __O, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtsepi64_epi32 (__mmask8 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A, + (__v8si) _mm256_setzero_si256 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_cvtsepi64_storeu_epi32 (void *__P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovsqd512mem_mask ((__v8si *) __P, (__v8di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_cvtsepi64_epi16 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A, + (__v8hi) _mm_undefined_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtsepi64_epi16 (__mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovsqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_cvtusepi32_epi8 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A, + (__v16qi) _mm_undefined_si128 (), + (__mmask16) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_mask_cvtusepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtusepi32_epi8 (__mmask16 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A) +{ + __builtin_ia32_pmovusdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_cvtusepi32_epi16 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A, + (__v16hi) _mm256_undefined_si256 (), + (__mmask16) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_mask_cvtusepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A, + (__v16hi) __O, + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtusepi32_epi16 (__mmask16 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A, + (__v16hi) _mm256_setzero_si256 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_cvtusepi32_storeu_epi16 (void *__P, __mmask16 __M, __m512i __A) +{ + __builtin_ia32_pmovusdw512mem_mask ((__v16hi*) __P, (__v16si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_cvtusepi64_epi8 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A, + (__v16qi) _mm_undefined_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtusepi64_epi8 (__mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovusqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_cvtusepi64_epi32 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A, + (__v8si) _mm256_undefined_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_mask_cvtusepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A, + (__v8si) __O, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtusepi64_epi32 (__mmask8 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A, + (__v8si) _mm256_setzero_si256 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_cvtusepi64_storeu_epi32 (void* __P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovusqd512mem_mask ((__v8si*) __P, (__v8di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_cvtusepi64_epi16 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A, + (__v8hi) _mm_undefined_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtusepi64_epi16 (__mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_cvtusepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovusqw512mem_mask ((__v8hi*) __P, (__v8di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_cvtepi32_epi8 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A, + (__v16qi) _mm_undefined_si128 (), + (__mmask16) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_mask_cvtepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepi32_epi8 (__mmask16 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_cvtepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A) +{ + __builtin_ia32_pmovdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_cvtepi32_epi16 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A, + (__v16hi) _mm256_undefined_si256 (), + (__mmask16) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_mask_cvtepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A, + (__v16hi) __O, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepi32_epi16 (__mmask16 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A, + (__v16hi) _mm256_setzero_si256 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_cvtepi32_storeu_epi16 (void * __P, __mmask16 __M, __m512i __A) +{ + __builtin_ia32_pmovdw512mem_mask ((__v16hi *) __P, (__v16si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_cvtepi64_epi8 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A, + (__v16qi) _mm_undefined_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepi64_epi8 (__mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_cvtepi64_epi32 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A, + (__v8si) _mm256_undefined_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_mask_cvtepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A, + (__v8si) __O, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepi64_epi32 (__mmask8 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A, + (__v8si) _mm256_setzero_si256 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_cvtepi64_storeu_epi32 (void* __P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovqd512mem_mask ((__v8si *) __P, (__v8di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_cvtepi64_epi16 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A, + (__v8hi) _mm_undefined_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm512_maskz_cvtepi64_epi16 (__mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_cvtepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M); +} + +#define _mm512_extracti32x4_epi32(A, imm) __extension__ ({ \ + (__m128i)__builtin_shufflevector((__v16si)(__m512i)(A), \ + (__v16si)_mm512_undefined_epi32(), \ + 0 + ((imm) & 0x3) * 4, \ + 1 + ((imm) & 0x3) * 4, \ + 2 + ((imm) & 0x3) * 4, \ + 3 + ((imm) & 0x3) * 4); }) + +#define _mm512_mask_extracti32x4_epi32(W, U, A, imm) __extension__ ({ \ + (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, \ + (__v4si)_mm512_extracti32x4_epi32((A), (imm)), \ + (__v4si)__W); }) + +#define _mm512_maskz_extracti32x4_epi32(U, A, imm) __extension__ ({ \ + (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, \ + (__v4si)_mm512_extracti32x4_epi32((A), (imm)), \ + (__v4si)_mm_setzero_si128()); }) + +#define _mm512_extracti64x4_epi64(A, imm) __extension__ ({ \ + (__m256i)__builtin_shufflevector((__v8di)(__m512i)(A), \ + (__v8di)_mm512_undefined_epi32(), \ + ((imm) & 1) ? 4 : 0, \ + ((imm) & 1) ? 5 : 1, \ + ((imm) & 1) ? 6 : 2, \ + ((imm) & 1) ? 7 : 3); }) + +#define _mm512_mask_extracti64x4_epi64(W, U, A, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \ + (__v4di)_mm512_extracti64x4_epi64((A), (imm)), \ + (__v4di)__W); }) + +#define _mm512_maskz_extracti64x4_epi64(U, A, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \ + (__v4di)_mm512_extracti64x4_epi64((A), (imm)), \ + (__v4di)_mm256_setzero_si256()); }) + +#define _mm512_insertf64x4(A, B, imm) __extension__ ({ \ + (__m512d)__builtin_shufflevector((__v8df)(__m512d)(A), \ + (__v8df)_mm512_castpd256_pd512((__m256d)(B)), \ + ((imm) & 0x1) ? 0 : 8, \ + ((imm) & 0x1) ? 1 : 9, \ + ((imm) & 0x1) ? 2 : 10, \ + ((imm) & 0x1) ? 3 : 11, \ + ((imm) & 0x1) ? 8 : 4, \ + ((imm) & 0x1) ? 9 : 5, \ + ((imm) & 0x1) ? 10 : 6, \ + ((imm) & 0x1) ? 11 : 7); }) + +#define _mm512_mask_insertf64x4(W, U, A, B, imm) __extension__ ({ \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_insertf64x4((A), (B), (imm)), \ + (__v8df)(W)); }) + +#define _mm512_maskz_insertf64x4(U, A, B, imm) __extension__ ({ \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_insertf64x4((A), (B), (imm)), \ + (__v8df)_mm512_setzero_pd()); }) + +#define _mm512_inserti64x4(A, B, imm) __extension__ ({ \ + (__m512i)__builtin_shufflevector((__v8di)(__m512i)(A), \ + (__v8di)_mm512_castsi256_si512((__m256i)(B)), \ + ((imm) & 0x1) ? 0 : 8, \ + ((imm) & 0x1) ? 1 : 9, \ + ((imm) & 0x1) ? 2 : 10, \ + ((imm) & 0x1) ? 3 : 11, \ + ((imm) & 0x1) ? 8 : 4, \ + ((imm) & 0x1) ? 9 : 5, \ + ((imm) & 0x1) ? 10 : 6, \ + ((imm) & 0x1) ? 11 : 7); }) + +#define _mm512_mask_inserti64x4(W, U, A, B, imm) __extension__ ({ \ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_inserti64x4((A), (B), (imm)), \ + (__v8di)(W)); }) + +#define _mm512_maskz_inserti64x4(U, A, B, imm) __extension__ ({ \ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_inserti64x4((A), (B), (imm)), \ + (__v8di)_mm512_setzero_si512()); }) + +#define _mm512_insertf32x4(A, B, imm) __extension__ ({ \ + (__m512)__builtin_shufflevector((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_castps128_ps512((__m128)(B)),\ + (((imm) & 0x3) == 0) ? 16 : 0, \ + (((imm) & 0x3) == 0) ? 17 : 1, \ + (((imm) & 0x3) == 0) ? 18 : 2, \ + (((imm) & 0x3) == 0) ? 19 : 3, \ + (((imm) & 0x3) == 1) ? 16 : 4, \ + (((imm) & 0x3) == 1) ? 17 : 5, \ + (((imm) & 0x3) == 1) ? 18 : 6, \ + (((imm) & 0x3) == 1) ? 19 : 7, \ + (((imm) & 0x3) == 2) ? 16 : 8, \ + (((imm) & 0x3) == 2) ? 17 : 9, \ + (((imm) & 0x3) == 2) ? 18 : 10, \ + (((imm) & 0x3) == 2) ? 19 : 11, \ + (((imm) & 0x3) == 3) ? 16 : 12, \ + (((imm) & 0x3) == 3) ? 17 : 13, \ + (((imm) & 0x3) == 3) ? 18 : 14, \ + (((imm) & 0x3) == 3) ? 19 : 15); }) + +#define _mm512_mask_insertf32x4(W, U, A, B, imm) __extension__ ({ \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \ + (__v16sf)(W)); }) + +#define _mm512_maskz_insertf32x4(U, A, B, imm) __extension__ ({ \ + (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \ + (__v16sf)_mm512_setzero_ps()); }) + +#define _mm512_inserti32x4(A, B, imm) __extension__ ({ \ + (__m512i)__builtin_shufflevector((__v16si)(__m512i)(A), \ + (__v16si)_mm512_castsi128_si512((__m128i)(B)),\ + (((imm) & 0x3) == 0) ? 16 : 0, \ + (((imm) & 0x3) == 0) ? 17 : 1, \ + (((imm) & 0x3) == 0) ? 18 : 2, \ + (((imm) & 0x3) == 0) ? 19 : 3, \ + (((imm) & 0x3) == 1) ? 16 : 4, \ + (((imm) & 0x3) == 1) ? 17 : 5, \ + (((imm) & 0x3) == 1) ? 18 : 6, \ + (((imm) & 0x3) == 1) ? 19 : 7, \ + (((imm) & 0x3) == 2) ? 16 : 8, \ + (((imm) & 0x3) == 2) ? 17 : 9, \ + (((imm) & 0x3) == 2) ? 18 : 10, \ + (((imm) & 0x3) == 2) ? 19 : 11, \ + (((imm) & 0x3) == 3) ? 16 : 12, \ + (((imm) & 0x3) == 3) ? 17 : 13, \ + (((imm) & 0x3) == 3) ? 18 : 14, \ + (((imm) & 0x3) == 3) ? 19 : 15); }) + +#define _mm512_mask_inserti32x4(W, U, A, B, imm) __extension__ ({ \ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_inserti32x4((A), (B), (imm)), \ + (__v16si)(W)); }) + +#define _mm512_maskz_inserti32x4(U, A, B, imm) __extension__ ({ \ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_inserti32x4((A), (B), (imm)), \ + (__v16si)_mm512_setzero_si512()); }) + +#define _mm512_getmant_round_pd(A, B, C, R) __extension__ ({ \ + (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8df)_mm512_undefined_pd(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_getmant_round_pd(W, U, A, B, C, R) __extension__ ({ \ + (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_maskz_getmant_round_pd(U, A, B, C, R) __extension__ ({ \ + (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_getmant_pd(A, B, C) __extension__ ({ \ + (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_mask_getmant_pd(W, U, A, B, C) __extension__ ({ \ + (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_maskz_getmant_pd(U, A, B, C) __extension__ ({ \ + (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_getmant_round_ps(A, B, C, R) __extension__ ({ \ + (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \ + (int)(((C)<<2) | (B)), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1, (int)(R)); }) + +#define _mm512_mask_getmant_round_ps(W, U, A, B, C, R) __extension__ ({ \ + (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \ + (int)(((C)<<2) | (B)), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), (int)(R)); }) + +#define _mm512_maskz_getmant_round_ps(U, A, B, C, R) __extension__ ({ \ + (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \ + (int)(((C)<<2) | (B)), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R)); }) + +#define _mm512_getmant_ps(A, B, C) __extension__ ({ \ + (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \ + (int)(((C)<<2)|(B)), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1, \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_mask_getmant_ps(W, U, A, B, C) __extension__ ({ \ + (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \ + (int)(((C)<<2)|(B)), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_maskz_getmant_ps(U, A, B, C) __extension__ ({ \ + (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \ + (int)(((C)<<2)|(B)), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_getexp_round_pd(A, R) __extension__ ({ \ + (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_undefined_pd(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_getexp_round_pd(W, U, A, R) __extension__ ({ \ + (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_maskz_getexp_round_pd(U, A, R) __extension__ ({ \ + (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_getexp_pd (__m512d __A) +{ + return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A, + (__v8df) _mm512_undefined_pd (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_getexp_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_getexp_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A, + (__v8df) _mm512_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_getexp_round_ps(A, R) __extension__ ({ \ + (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1, (int)(R)); }) + +#define _mm512_mask_getexp_round_ps(W, U, A, R) __extension__ ({ \ + (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), (int)(R)); }) + +#define _mm512_maskz_getexp_round_ps(U, A, R) __extension__ ({ \ + (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R)); }) + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_getexp_ps (__m512 __A) +{ + return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A, + (__v16sf) _mm512_undefined_ps (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_getexp_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_getexp_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A, + (__v16sf) _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_i64gather_ps(index, addr, scale) __extension__ ({ \ + (__m256)__builtin_ia32_gatherdiv16sf((__v8sf)_mm256_undefined_ps(), \ + (float const *)(addr), \ + (__v8di)(__m512i)(index), (__mmask8)-1, \ + (int)(scale)); }) + +#define _mm512_mask_i64gather_ps( __v1_old, __mask, __index,\ + __addr, __scale) __extension__({\ +__builtin_ia32_gatherdiv16sf ((__v8sf) __v1_old,\ + __addr,(__v8di) __index, __mask, __scale);\ +}) + +#define _mm512_i64gather_epi32(index, addr, scale) __extension__ ({\ + (__m256i)__builtin_ia32_gatherdiv16si((__v8si)_mm256_undefined_ps(), \ + (int const *)(addr), \ + (__v8di)(__m512i)(index), \ + (__mmask8)-1, (int)(scale)); }) + +#define _mm512_mask_i64gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\ + (__m256i)__builtin_ia32_gatherdiv16si((__v8si)(__m256i)(v1_old), \ + (int const *)(addr), \ + (__v8di)(__m512i)(index), \ + (__mmask8)(mask), (int)(scale)); }) + +#define _mm512_i64gather_pd(index, addr, scale) __extension__ ({\ + (__m512d)__builtin_ia32_gatherdiv8df((__v8df)_mm512_undefined_pd(), \ + (double const *)(addr), \ + (__v8di)(__m512i)(index), (__mmask8)-1, \ + (int)(scale)); }) + +#define _mm512_mask_i64gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\ + (__m512d)__builtin_ia32_gatherdiv8df((__v8df)(__m512d)(v1_old), \ + (double const *)(addr), \ + (__v8di)(__m512i)(index), \ + (__mmask8)(mask), (int)(scale)); }) + +#define _mm512_i64gather_epi64(index, addr, scale) __extension__ ({\ + (__m512i)__builtin_ia32_gatherdiv8di((__v8di)_mm512_undefined_pd(), \ + (long long const *)(addr), \ + (__v8di)(__m512i)(index), (__mmask8)-1, \ + (int)(scale)); }) + +#define _mm512_mask_i64gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\ + (__m512i)__builtin_ia32_gatherdiv8di((__v8di)(__m512i)(v1_old), \ + (long long const *)(addr), \ + (__v8di)(__m512i)(index), \ + (__mmask8)(mask), (int)(scale)); }) + +#define _mm512_i32gather_ps(index, addr, scale) __extension__ ({\ + (__m512)__builtin_ia32_gathersiv16sf((__v16sf)_mm512_undefined_ps(), \ + (float const *)(addr), \ + (__v16sf)(__m512)(index), \ + (__mmask16)-1, (int)(scale)); }) + +#define _mm512_mask_i32gather_ps(v1_old, mask, index, addr, scale) __extension__ ({\ + (__m512)__builtin_ia32_gathersiv16sf((__v16sf)(__m512)(v1_old), \ + (float const *)(addr), \ + (__v16sf)(__m512)(index), \ + (__mmask16)(mask), (int)(scale)); }) + +#define _mm512_i32gather_epi32(index, addr, scale) __extension__ ({\ + (__m512i)__builtin_ia32_gathersiv16si((__v16si)_mm512_undefined_epi32(), \ + (int const *)(addr), \ + (__v16si)(__m512i)(index), \ + (__mmask16)-1, (int)(scale)); }) + +#define _mm512_mask_i32gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\ + (__m512i)__builtin_ia32_gathersiv16si((__v16si)(__m512i)(v1_old), \ + (int const *)(addr), \ + (__v16si)(__m512i)(index), \ + (__mmask16)(mask), (int)(scale)); }) + +#define _mm512_i32gather_pd(index, addr, scale) __extension__ ({\ + (__m512d)__builtin_ia32_gathersiv8df((__v8df)_mm512_undefined_pd(), \ + (double const *)(addr), \ + (__v8si)(__m256i)(index), (__mmask8)-1, \ + (int)(scale)); }) + +#define _mm512_mask_i32gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\ + (__m512d)__builtin_ia32_gathersiv8df((__v8df)(__m512d)(v1_old), \ + (double const *)(addr), \ + (__v8si)(__m256i)(index), \ + (__mmask8)(mask), (int)(scale)); }) + +#define _mm512_i32gather_epi64(index, addr, scale) __extension__ ({\ + (__m512i)__builtin_ia32_gathersiv8di((__v8di)_mm512_undefined_epi32(), \ + (long long const *)(addr), \ + (__v8si)(__m256i)(index), (__mmask8)-1, \ + (int)(scale)); }) + +#define _mm512_mask_i32gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\ + (__m512i)__builtin_ia32_gathersiv8di((__v8di)(__m512i)(v1_old), \ + (long long const *)(addr), \ + (__v8si)(__m256i)(index), \ + (__mmask8)(mask), (int)(scale)); }) + +#define _mm512_i64scatter_ps(addr, index, v1, scale) __extension__ ({\ + __builtin_ia32_scatterdiv16sf((float *)(addr), (__mmask8)-1, \ + (__v8di)(__m512i)(index), \ + (__v8sf)(__m256)(v1), (int)(scale)); }) + +#define _mm512_mask_i64scatter_ps(addr, mask, index, v1, scale) __extension__ ({\ + __builtin_ia32_scatterdiv16sf((float *)(addr), (__mmask8)(mask), \ + (__v8di)(__m512i)(index), \ + (__v8sf)(__m256)(v1), (int)(scale)); }) + +#define _mm512_i64scatter_epi32(addr, index, v1, scale) __extension__ ({\ + __builtin_ia32_scatterdiv16si((int *)(addr), (__mmask8)-1, \ + (__v8di)(__m512i)(index), \ + (__v8si)(__m256i)(v1), (int)(scale)); }) + +#define _mm512_mask_i64scatter_epi32(addr, mask, index, v1, scale) __extension__ ({\ + __builtin_ia32_scatterdiv16si((int *)(addr), (__mmask8)(mask), \ + (__v8di)(__m512i)(index), \ + (__v8si)(__m256i)(v1), (int)(scale)); }) + +#define _mm512_i64scatter_pd(addr, index, v1, scale) __extension__ ({\ + __builtin_ia32_scatterdiv8df((double *)(addr), (__mmask8)-1, \ + (__v8di)(__m512i)(index), \ + (__v8df)(__m512d)(v1), (int)(scale)); }) + +#define _mm512_mask_i64scatter_pd(addr, mask, index, v1, scale) __extension__ ({\ + __builtin_ia32_scatterdiv8df((double *)(addr), (__mmask8)(mask), \ + (__v8di)(__m512i)(index), \ + (__v8df)(__m512d)(v1), (int)(scale)); }) + +#define _mm512_i64scatter_epi64(addr, index, v1, scale) __extension__ ({\ + __builtin_ia32_scatterdiv8di((long long *)(addr), (__mmask8)-1, \ + (__v8di)(__m512i)(index), \ + (__v8di)(__m512i)(v1), (int)(scale)); }) + +#define _mm512_mask_i64scatter_epi64(addr, mask, index, v1, scale) __extension__ ({\ + __builtin_ia32_scatterdiv8di((long long *)(addr), (__mmask8)(mask), \ + (__v8di)(__m512i)(index), \ + (__v8di)(__m512i)(v1), (int)(scale)); }) + +#define _mm512_i32scatter_ps(addr, index, v1, scale) __extension__ ({\ + __builtin_ia32_scattersiv16sf((float *)(addr), (__mmask16)-1, \ + (__v16si)(__m512i)(index), \ + (__v16sf)(__m512)(v1), (int)(scale)); }) + +#define _mm512_mask_i32scatter_ps(addr, mask, index, v1, scale) __extension__ ({\ + __builtin_ia32_scattersiv16sf((float *)(addr), (__mmask16)(mask), \ + (__v16si)(__m512i)(index), \ + (__v16sf)(__m512)(v1), (int)(scale)); }) + +#define _mm512_i32scatter_epi32(addr, index, v1, scale) __extension__ ({\ + __builtin_ia32_scattersiv16si((int *)(addr), (__mmask16)-1, \ + (__v16si)(__m512i)(index), \ + (__v16si)(__m512i)(v1), (int)(scale)); }) + +#define _mm512_mask_i32scatter_epi32(addr, mask, index, v1, scale) __extension__ ({\ + __builtin_ia32_scattersiv16si((int *)(addr), (__mmask16)(mask), \ + (__v16si)(__m512i)(index), \ + (__v16si)(__m512i)(v1), (int)(scale)); }) + +#define _mm512_i32scatter_pd(addr, index, v1, scale) __extension__ ({\ + __builtin_ia32_scattersiv8df((double *)(addr), (__mmask8)-1, \ + (__v8si)(__m256i)(index), \ + (__v8df)(__m512d)(v1), (int)(scale)); }) + +#define _mm512_mask_i32scatter_pd(addr, mask, index, v1, scale) __extension__ ({\ + __builtin_ia32_scattersiv8df((double *)(addr), (__mmask8)(mask), \ + (__v8si)(__m256i)(index), \ + (__v8df)(__m512d)(v1), (int)(scale)); }) + +#define _mm512_i32scatter_epi64(addr, index, v1, scale) __extension__ ({\ + __builtin_ia32_scattersiv8di((long long *)(addr), (__mmask8)-1, \ + (__v8si)(__m256i)(index), \ + (__v8di)(__m512i)(v1), (int)(scale)); }) + +#define _mm512_mask_i32scatter_epi64(addr, mask, index, v1, scale) __extension__ ({\ + __builtin_ia32_scattersiv8di((long long *)(addr), (__mmask8)(mask), \ + (__v8si)(__m256i)(index), \ + (__v8di)(__m512i)(v1), (int)(scale)); }) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_fmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W, + (__v4sf) __A, + (__v4sf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_fmadd_round_ss(W, U, A, B, R) __extension__({\ + (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \ + (__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (__mmask8)(U), \ + (int)(R)); }) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_fmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fmadd_round_ss(U, A, B, C, R) __extension__ ({\ + (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(C), (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask3_fmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W, + (__v4sf) __X, + (__v4sf) __Y, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fmadd_round_ss(W, X, Y, U, R) __extension__ ({\ + (__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \ + (__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (__mmask8)(U), \ + (int)(R)); }) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_fmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W, + (__v4sf) __A, + -(__v4sf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_fmsub_round_ss(W, U, A, B, R) __extension__ ({\ + (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \ + (__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (__mmask8)(U), \ + (int)(R)); }) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_fmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __A, + (__v4sf) __B, + -(__v4sf) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fmsub_round_ss(U, A, B, C, R) __extension__ ({\ + (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + -(__v4sf)(__m128)(C), (__mmask8)(U), \ + (int)(R)); }) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask3_fmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U) +{ + return (__m128) __builtin_ia32_vfmsubss3_mask3 ((__v4sf) __W, + (__v4sf) __X, + (__v4sf) __Y, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fmsub_round_ss(W, X, Y, U, R) __extension__ ({\ + (__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \ + (__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (__mmask8)(U), \ + (int)(R)); }) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_fnmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W, + -(__v4sf) __A, + (__v4sf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_fnmadd_round_ss(W, U, A, B, R) __extension__ ({\ + (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \ + -(__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (__mmask8)(U), \ + (int)(R)); }) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_fnmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddss3_maskz (-(__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fnmadd_round_ss(U, A, B, C, R) __extension__ ({\ + (__m128)__builtin_ia32_vfmaddss3_maskz(-(__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(C), (__mmask8)(U), \ + (int)(R)); }) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask3_fnmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask3 (-(__v4sf) __W, + (__v4sf) __X, + (__v4sf) __Y, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fnmadd_round_ss(W, X, Y, U, R) __extension__({\ + (__m128)__builtin_ia32_vfmaddss3_mask3(-(__v4sf)(__m128)(W), \ + (__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (__mmask8)(U), \ + (int)(R)); }) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_fnmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W, + -(__v4sf) __A, + -(__v4sf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_fnmsub_round_ss(W, U, A, B, R) __extension__ ({\ + (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \ + -(__v4sf)(__m128)(A), \ + -(__v4sf)(__m128)(B), (__mmask8)(U), \ + (int)(R)); }) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_fnmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddss3_maskz (-(__v4sf) __A, + (__v4sf) __B, + -(__v4sf) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fnmsub_round_ss(U, A, B, C, R) __extension__ ({\ + (__m128)__builtin_ia32_vfmaddss3_maskz(-(__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + -(__v4sf)(__m128)(C), (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask3_fnmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U) +{ + return (__m128) __builtin_ia32_vfnmsubss3_mask3 ((__v4sf) __W, + (__v4sf) __X, + (__v4sf) __Y, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fnmsub_round_ss(W, X, Y, U, R) __extension__({\ + (__m128)__builtin_ia32_vfnmsubss3_mask3((__v4sf)(__m128)(W), \ + (__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (__mmask8)(U), \ + (int)(R)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_fmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_mask ( (__v2df) __W, + (__v2df) __A, + (__v2df) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_fmadd_round_sd(W, U, A, B, R) __extension__({\ + (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \ + (__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (__mmask8)(U), \ + (int)(R)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_fmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_maskz ( (__v2df) __A, + (__v2df) __B, + (__v2df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fmadd_round_sd(U, A, B, C, R) __extension__ ({\ + (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(C), (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask3_fmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W, + (__v2df) __X, + (__v2df) __Y, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fmadd_round_sd(W, X, Y, U, R) __extension__ ({\ + (__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \ + (__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (__mmask8)(U), \ + (int)(R)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_fmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_mask ( (__v2df) __W, + (__v2df) __A, + -(__v2df) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_fmsub_round_sd(W, U, A, B, R) __extension__ ({\ + (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \ + (__v2df)(__m128d)(A), \ + -(__v2df)(__m128d)(B), (__mmask8)(U), \ + (int)(R)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_fmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_maskz ( (__v2df) __A, + (__v2df) __B, + -(__v2df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fmsub_round_sd(U, A, B, C, R) __extension__ ({\ + (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + -(__v2df)(__m128d)(C), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask3_fmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U) +{ + return (__m128d) __builtin_ia32_vfmsubsd3_mask3 ((__v2df) __W, + (__v2df) __X, + (__v2df) __Y, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fmsub_round_sd(W, X, Y, U, R) __extension__ ({\ + (__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \ + (__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_fnmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_mask ( (__v2df) __W, + -(__v2df) __A, + (__v2df) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_fnmadd_round_sd(W, U, A, B, R) __extension__ ({\ + (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \ + -(__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (__mmask8)(U), \ + (int)(R)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_fnmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_maskz ( -(__v2df) __A, + (__v2df) __B, + (__v2df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fnmadd_round_sd(U, A, B, C, R) __extension__ ({\ + (__m128d)__builtin_ia32_vfmaddsd3_maskz(-(__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(C), (__mmask8)(U), \ + (int)(R)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask3_fnmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_mask3 (-(__v2df) __W, + (__v2df) __X, + (__v2df) __Y, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fnmadd_round_sd(W, X, Y, U, R) __extension__({\ + (__m128d)__builtin_ia32_vfmaddsd3_mask3(-(__v2df)(__m128d)(W), \ + (__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (__mmask8)(U), \ + (int)(R)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_fnmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_mask ( (__v2df) __W, + -(__v2df) __A, + -(__v2df) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_fnmsub_round_sd(W, U, A, B, R) __extension__ ({\ + (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \ + -(__v2df)(__m128d)(A), \ + -(__v2df)(__m128d)(B), (__mmask8)(U), \ + (int)(R)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_fnmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_maskz ( -(__v2df) __A, + (__v2df) __B, + -(__v2df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fnmsub_round_sd(U, A, B, C, R) __extension__ ({\ + (__m128d)__builtin_ia32_vfmaddsd3_maskz(-(__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + -(__v2df)(__m128d)(C), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask3_fnmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U) +{ + return (__m128d) __builtin_ia32_vfnmsubsd3_mask3 ((__v2df) (__W), + (__v2df) __X, + (__v2df) (__Y), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fnmsub_round_sd(W, X, Y, U, R) __extension__({\ + (__m128d)__builtin_ia32_vfnmsubsd3_mask3((__v2df)(__m128d)(W), \ + (__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_permutex_pd(X, C) __extension__ ({ \ + (__m512d)__builtin_shufflevector((__v8df)(__m512d)(X), \ + (__v8df)_mm512_undefined_pd(), \ + 0 + (((C) >> 0) & 0x3), \ + 0 + (((C) >> 2) & 0x3), \ + 0 + (((C) >> 4) & 0x3), \ + 0 + (((C) >> 6) & 0x3), \ + 4 + (((C) >> 0) & 0x3), \ + 4 + (((C) >> 2) & 0x3), \ + 4 + (((C) >> 4) & 0x3), \ + 4 + (((C) >> 6) & 0x3)); }) + +#define _mm512_mask_permutex_pd(W, U, X, C) __extension__ ({ \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_permutex_pd((X), (C)), \ + (__v8df)(__m512d)(W)); }) + +#define _mm512_maskz_permutex_pd(U, X, C) __extension__ ({ \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_permutex_pd((X), (C)), \ + (__v8df)_mm512_setzero_pd()); }) + +#define _mm512_permutex_epi64(X, C) __extension__ ({ \ + (__m512i)__builtin_shufflevector((__v8di)(__m512i)(X), \ + (__v8di)_mm512_undefined_epi32(), \ + 0 + (((C) >> 0) & 0x3), \ + 0 + (((C) >> 2) & 0x3), \ + 0 + (((C) >> 4) & 0x3), \ + 0 + (((C) >> 6) & 0x3), \ + 4 + (((C) >> 0) & 0x3), \ + 4 + (((C) >> 2) & 0x3), \ + 4 + (((C) >> 4) & 0x3), \ + 4 + (((C) >> 6) & 0x3)); }) + +#define _mm512_mask_permutex_epi64(W, U, X, C) __extension__ ({ \ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_permutex_epi64((X), (C)), \ + (__v8di)(__m512i)(W)); }) + +#define _mm512_maskz_permutex_epi64(U, X, C) __extension__ ({ \ + (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_permutex_epi64((X), (C)), \ + (__v8di)_mm512_setzero_si512()); }) + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_permutexvar_pd (__m512i __X, __m512d __Y) +{ + return (__m512d) __builtin_ia32_permvardf512_mask ((__v8df) __Y, + (__v8di) __X, + (__v8df) _mm512_undefined_pd (), + (__mmask8) -1); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_permutexvar_pd (__m512d __W, __mmask8 __U, __m512i __X, __m512d __Y) +{ + return (__m512d) __builtin_ia32_permvardf512_mask ((__v8df) __Y, + (__v8di) __X, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_permutexvar_pd (__mmask8 __U, __m512i __X, __m512d __Y) +{ + return (__m512d) __builtin_ia32_permvardf512_mask ((__v8df) __Y, + (__v8di) __X, + (__v8df) _mm512_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_permutexvar_epi64 (__mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_permvardi512_mask ((__v8di) __Y, + (__v8di) __X, + (__v8di) _mm512_setzero_si512 (), + __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_permutexvar_epi64 (__m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_permvardi512_mask ((__v8di) __Y, + (__v8di) __X, + (__v8di) _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_permutexvar_epi64 (__m512i __W, __mmask8 __M, __m512i __X, + __m512i __Y) +{ + return (__m512i) __builtin_ia32_permvardi512_mask ((__v8di) __Y, + (__v8di) __X, + (__v8di) __W, + __M); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_permutexvar_ps (__m512i __X, __m512 __Y) +{ + return (__m512) __builtin_ia32_permvarsf512_mask ((__v16sf) __Y, + (__v16si) __X, + (__v16sf) _mm512_undefined_ps (), + (__mmask16) -1); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_permutexvar_ps (__m512 __W, __mmask16 __U, __m512i __X, __m512 __Y) +{ + return (__m512) __builtin_ia32_permvarsf512_mask ((__v16sf) __Y, + (__v16si) __X, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_permutexvar_ps (__mmask16 __U, __m512i __X, __m512 __Y) +{ + return (__m512) __builtin_ia32_permvarsf512_mask ((__v16sf) __Y, + (__v16si) __X, + (__v16sf) _mm512_setzero_ps (), + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_permutexvar_epi32 (__mmask16 __M, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_permvarsi512_mask ((__v16si) __Y, + (__v16si) __X, + (__v16si) _mm512_setzero_si512 (), + __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_permutexvar_epi32 (__m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_permvarsi512_mask ((__v16si) __Y, + (__v16si) __X, + (__v16si) _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_permutexvar_epi32 (__m512i __W, __mmask16 __M, __m512i __X, + __m512i __Y) +{ + return (__m512i) __builtin_ia32_permvarsi512_mask ((__v16si) __Y, + (__v16si) __X, + (__v16si) __W, + __M); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_kand (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_kandhi ((__mmask16) __A, (__mmask16) __B); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_kandn (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_kandnhi ((__mmask16) __A, (__mmask16) __B); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_kor (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_korhi ((__mmask16) __A, (__mmask16) __B); +} + +static __inline__ int __DEFAULT_FN_ATTRS +_mm512_kortestc (__mmask16 __A, __mmask16 __B) +{ + return __builtin_ia32_kortestchi ((__mmask16) __A, (__mmask16) __B); +} + +static __inline__ int __DEFAULT_FN_ATTRS +_mm512_kortestz (__mmask16 __A, __mmask16 __B) +{ + return __builtin_ia32_kortestzhi ((__mmask16) __A, (__mmask16) __B); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_kunpackb (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_kunpckhi ((__mmask16) __A, (__mmask16) __B); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_kxnor (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_kxnorhi ((__mmask16) __A, (__mmask16) __B); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_kxor (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_kxorhi ((__mmask16) __A, (__mmask16) __B); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_stream_si512 (__m512i * __P, __m512i __A) +{ + __builtin_nontemporal_store((__v8di)__A, (__v8di*)__P); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_stream_load_si512 (void *__P) +{ + return __builtin_ia32_movntdqa512 ((__v8di *)__P); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_stream_pd (double *__P, __m512d __A) +{ + __builtin_nontemporal_store((__v8df)__A, (__v8df*)__P); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_stream_ps (float *__P, __m512 __A) +{ + __builtin_nontemporal_store((__v16sf)__A, (__v16sf*)__P); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_compress_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_compressdf512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_compress_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_compressdf512_mask ((__v8df) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_compress_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_compressdi512_mask ((__v8di) __A, + (__v8di) __W, + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_compress_epi64 (__mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_compressdi512_mask ((__v8di) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_compress_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_compresssf512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_compress_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_compresssf512_mask ((__v16sf) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_compress_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_compresssi512_mask ((__v16si) __A, + (__v16si) __W, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_compress_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_compresssi512_mask ((__v16si) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +#define _mm_cmp_round_ss_mask(X, Y, P, R) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(P), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm_mask_cmp_round_ss_mask(M, X, Y, P, R) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(P), \ + (__mmask8)(M), (int)(R)); }) + +#define _mm_cmp_ss_mask(X, Y, P) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(P), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_mask_cmp_ss_mask(M, X, Y, P) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(P), \ + (__mmask8)(M), \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_cmp_round_sd_mask(X, Y, P, R) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(P), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm_mask_cmp_round_sd_mask(M, X, Y, P, R) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(P), \ + (__mmask8)(M), (int)(R)); }) + +#define _mm_cmp_sd_mask(X, Y, P) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(P), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION); }) + +#define _mm_mask_cmp_sd_mask(M, X, Y, P) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(P), \ + (__mmask8)(M), \ + _MM_FROUND_CUR_DIRECTION); }) + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_movehdup_ps (__m512 __A) +{ + return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A, + 1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_movehdup_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_movehdup_ps(__A), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_movehdup_ps (__mmask16 __U, __m512 __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_movehdup_ps(__A), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_moveldup_ps (__m512 __A) +{ + return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A, + 0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_moveldup_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_moveldup_ps(__A), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_moveldup_ps (__mmask16 __U, __m512 __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_moveldup_ps(__A), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_move_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + __m128 res = __A; + res[0] = (__U & 1) ? __B[0] : __W[0]; + return res; +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_move_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + __m128 res = __A; + res[0] = (__U & 1) ? __B[0] : 0; + return res; +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_move_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + __m128d res = __A; + res[0] = (__U & 1) ? __B[0] : __W[0]; + return res; +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_move_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + __m128d res = __A; + res[0] = (__U & 1) ? __B[0] : 0; + return res; +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_store_ss (float * __W, __mmask8 __U, __m128 __A) +{ + __builtin_ia32_storess128_mask ((__v16sf *)__W, + (__v16sf) _mm512_castps128_ps512(__A), + (__mmask16) __U & (__mmask16)1); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_store_sd (double * __W, __mmask8 __U, __m128d __A) +{ + __builtin_ia32_storesd128_mask ((__v8df *)__W, + (__v8df) _mm512_castpd128_pd512(__A), + (__mmask8) __U & 1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_load_ss (__m128 __W, __mmask8 __U, const float* __A) +{ + __m128 src = (__v4sf) __builtin_shufflevector((__v4sf) __W, + (__v4sf) {0.0, 0.0, 0.0, 0.0}, + 0, 4, 4, 4); + + return (__m128) __builtin_shufflevector( + __builtin_ia32_loadss128_mask ((__v16sf *) __A, + (__v16sf) _mm512_castps128_ps512(src), + (__mmask16) __U & 1), + _mm512_undefined_ps(), 0, 1, 2, 3); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_load_ss (__mmask8 __U, const float* __A) +{ + return (__m128) __builtin_shufflevector( + __builtin_ia32_loadss128_mask ((__v16sf *) __A, + (__v16sf) _mm512_setzero_ps(), + (__mmask16) __U & 1), + _mm512_undefined_ps(), 0, 1, 2, 3); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_load_sd (__m128d __W, __mmask8 __U, const double* __A) +{ + __m128d src = (__v2df) __builtin_shufflevector((__v2df) __W, + (__v2df) {0.0, 0.0}, 0, 2); + + return (__m128d) __builtin_shufflevector( + __builtin_ia32_loadsd128_mask ((__v8df *) __A, + (__v8df) _mm512_castpd128_pd512(src), + (__mmask8) __U & 1), + _mm512_undefined_pd(), 0, 1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_load_sd (__mmask8 __U, const double* __A) +{ + return (__m128d) __builtin_shufflevector( + __builtin_ia32_loadsd128_mask ((__v8df *) __A, + (__v8df) _mm512_setzero_pd(), + (__mmask8) __U & 1), + _mm512_undefined_pd(), 0, 1); +} + +#define _mm512_shuffle_epi32(A, I) __extension__ ({ \ + (__m512i)__builtin_shufflevector((__v16si)(__m512i)(A), \ + (__v16si)_mm512_undefined_epi32(), \ + 0 + (((I) >> 0) & 0x3), \ + 0 + (((I) >> 2) & 0x3), \ + 0 + (((I) >> 4) & 0x3), \ + 0 + (((I) >> 6) & 0x3), \ + 4 + (((I) >> 0) & 0x3), \ + 4 + (((I) >> 2) & 0x3), \ + 4 + (((I) >> 4) & 0x3), \ + 4 + (((I) >> 6) & 0x3), \ + 8 + (((I) >> 0) & 0x3), \ + 8 + (((I) >> 2) & 0x3), \ + 8 + (((I) >> 4) & 0x3), \ + 8 + (((I) >> 6) & 0x3), \ + 12 + (((I) >> 0) & 0x3), \ + 12 + (((I) >> 2) & 0x3), \ + 12 + (((I) >> 4) & 0x3), \ + 12 + (((I) >> 6) & 0x3)); }) + +#define _mm512_mask_shuffle_epi32(W, U, A, I) __extension__ ({ \ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_shuffle_epi32((A), (I)), \ + (__v16si)(__m512i)(W)); }) + +#define _mm512_maskz_shuffle_epi32(U, A, I) __extension__ ({ \ + (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_shuffle_epi32((A), (I)), \ + (__v16si)_mm512_setzero_si512()); }) + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_expand_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_expanddf512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_expand_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_expanddf512_mask ((__v8df) __A, + (__v8df) _mm512_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_expand_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A, + (__v8di) __W, + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_expand_epi64 ( __mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A, + (__v8di) _mm512_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_expandloadu_pd(__m512d __W, __mmask8 __U, void const *__P) +{ + return (__m512d) __builtin_ia32_expandloaddf512_mask ((const __v8df *)__P, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_expandloadu_pd(__mmask8 __U, void const *__P) +{ + return (__m512d) __builtin_ia32_expandloaddf512_mask ((const __v8df *)__P, + (__v8df) _mm512_setzero_pd(), + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_expandloadu_epi64(__m512i __W, __mmask8 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_expandloaddi512_mask ((const __v8di *)__P, + (__v8di) __W, + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_expandloadu_epi64(__mmask8 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_expandloaddi512_mask ((const __v8di *)__P, + (__v8di) _mm512_setzero_pd(), + (__mmask8) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_expandloadu_ps(__m512 __W, __mmask16 __U, void const *__P) +{ + return (__m512) __builtin_ia32_expandloadsf512_mask ((const __v16sf *)__P, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_expandloadu_ps(__mmask16 __U, void const *__P) +{ + return (__m512) __builtin_ia32_expandloadsf512_mask ((const __v16sf *)__P, + (__v16sf) _mm512_setzero_ps(), + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_expandloadu_epi32(__m512i __W, __mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_expandloadsi512_mask ((const __v16si *)__P, + (__v16si) __W, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_expandloadu_epi32(__mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_expandloadsi512_mask ((const __v16si *)__P, + (__v16si) _mm512_setzero_ps(), + (__mmask16) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_expand_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_expandsf512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_expand_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_expandsf512_mask ((__v16sf) __A, + (__v16sf) _mm512_setzero_ps(), + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_expand_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A, + (__v16si) __W, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_expand_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A, + (__v16si) _mm512_setzero_ps(), + (__mmask16) __U); +} + +#define _mm512_cvt_roundps_pd(A, R) __extension__ ({ \ + (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \ + (__v8df)_mm512_undefined_pd(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm512_mask_cvt_roundps_pd(W, U, A, R) __extension__ ({ \ + (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm512_maskz_cvt_roundps_pd(U, A, R) __extension__ ({ \ + (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_cvtps_pd (__m256 __A) +{ + return (__m512d) __builtin_ia32_cvtps2pd512_mask ((__v8sf) __A, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_cvtps_pd (__m512d __W, __mmask8 __U, __m256 __A) +{ + return (__m512d) __builtin_ia32_cvtps2pd512_mask ((__v8sf) __A, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_cvtps_pd (__mmask8 __U, __m256 __A) +{ + return (__m512d) __builtin_ia32_cvtps2pd512_mask ((__v8sf) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_cvtpslo_pd (__m512 __A) +{ + return (__m512) _mm512_cvtps_pd(_mm512_castps512_ps256(__A)); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_cvtpslo_pd (__m512d __W, __mmask8 __U, __m512 __A) +{ + return (__m512) _mm512_mask_cvtps_pd(__W, __U, _mm512_castps512_ps256(__A)); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_mov_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U, + (__v8df) __A, + (__v8df) __W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_maskz_mov_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U, + (__v8df) __A, + (__v8df) _mm512_setzero_pd ()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_mov_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U, + (__v16sf) __A, + (__v16sf) __W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_maskz_mov_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U, + (__v16sf) __A, + (__v16sf) _mm512_setzero_ps ()); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m512d __A) +{ + __builtin_ia32_compressstoredf512_mask ((__v8df *) __P, (__v8df) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m512i __A) +{ + __builtin_ia32_compressstoredi512_mask ((__v8di *) __P, (__v8di) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_compressstoreu_ps (void *__P, __mmask16 __U, __m512 __A) +{ + __builtin_ia32_compressstoresf512_mask ((__v16sf *) __P, (__v16sf) __A, + (__mmask16) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_compressstoreu_epi32 (void *__P, __mmask16 __U, __m512i __A) +{ + __builtin_ia32_compressstoresi512_mask ((__v16si *) __P, (__v16si) __A, + (__mmask16) __U); +} + +#define _mm_cvt_roundsd_ss(A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \ + (__v2df)(__m128d)(B), \ + (__v4sf)_mm_undefined_ps(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm_mask_cvt_roundsd_ss(W, U, A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \ + (__v2df)(__m128d)(B), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm_maskz_cvt_roundsd_ss(U, A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \ + (__v2df)(__m128d)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_cvtsd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128d __B) +{ + return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)(__A), + (__v2df)(__B), + (__v4sf)(__W), + (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_cvtsd_ss (__mmask8 __U, __m128 __A, __m128d __B) +{ + return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)(__A), + (__v2df)(__B), + (__v4sf)_mm_setzero_ps(), + (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_cvtss_i32 _mm_cvtss_si32 +#define _mm_cvtsd_i32 _mm_cvtsd_si32 +#define _mm_cvti32_sd _mm_cvtsi32_sd +#define _mm_cvti32_ss _mm_cvtsi32_ss +#ifdef __x86_64__ +#define _mm_cvtss_i64 _mm_cvtss_si64 +#define _mm_cvtsd_i64 _mm_cvtsd_si64 +#define _mm_cvti64_sd _mm_cvtsi64_sd +#define _mm_cvti64_ss _mm_cvtsi64_ss +#endif + +#ifdef __x86_64__ +#define _mm_cvt_roundi64_sd(A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \ + (int)(R)); }) + +#define _mm_cvt_roundsi64_sd(A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \ + (int)(R)); }) +#endif + +#define _mm_cvt_roundsi32_ss(A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)); }) + +#define _mm_cvt_roundi32_ss(A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)); }) + +#ifdef __x86_64__ +#define _mm_cvt_roundsi64_ss(A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \ + (int)(R)); }) + +#define _mm_cvt_roundi64_ss(A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \ + (int)(R)); }) +#endif + +#define _mm_cvt_roundss_sd(A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \ + (__v4sf)(__m128)(B), \ + (__v2df)_mm_undefined_pd(), \ + (__mmask8)-1, (int)(R)); }) + +#define _mm_mask_cvt_roundss_sd(W, U, A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \ + (__v4sf)(__m128)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R)); }) + +#define _mm_maskz_cvt_roundss_sd(U, A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \ + (__v4sf)(__m128)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_cvtss_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128 __B) +{ + return __builtin_ia32_cvtss2sd_round_mask((__v2df)(__A), + (__v4sf)(__B), + (__v2df)(__W), + (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_cvtss_sd (__mmask8 __U, __m128d __A, __m128 __B) +{ + return __builtin_ia32_cvtss2sd_round_mask((__v2df)(__A), + (__v4sf)(__B), + (__v2df)_mm_setzero_pd(), + (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cvtu32_sd (__m128d __A, unsigned __B) +{ + return (__m128d) __builtin_ia32_cvtusi2sd32 ((__v2df) __A, __B); +} + +#ifdef __x86_64__ +#define _mm_cvt_roundu64_sd(A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_cvtusi2sd64((__v2df)(__m128d)(A), \ + (unsigned long long)(B), (int)(R)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cvtu64_sd (__m128d __A, unsigned long long __B) +{ + return (__m128d) __builtin_ia32_cvtusi2sd64 ((__v2df) __A, __B, + _MM_FROUND_CUR_DIRECTION); +} +#endif + +#define _mm_cvt_roundu32_ss(A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_cvtusi2ss32((__v4sf)(__m128)(A), (unsigned int)(B), \ + (int)(R)); }) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cvtu32_ss (__m128 __A, unsigned __B) +{ + return (__m128) __builtin_ia32_cvtusi2ss32 ((__v4sf) __A, __B, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __x86_64__ +#define _mm_cvt_roundu64_ss(A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_cvtusi2ss64((__v4sf)(__m128)(A), \ + (unsigned long long)(B), (int)(R)); }) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cvtu64_ss (__m128 __A, unsigned long long __B) +{ + return (__m128) __builtin_ia32_cvtusi2ss64 ((__v4sf) __A, __B, + _MM_FROUND_CUR_DIRECTION); +} +#endif + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_set1_epi32 (__m512i __O, __mmask16 __M, int __A) +{ + return (__m512i) __builtin_ia32_pbroadcastd512_gpr_mask (__A, (__v16si) __O, + __M); +} + +#ifdef __x86_64__ +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_set1_epi64 (__m512i __O, __mmask8 __M, long long __A) +{ + return (__m512i) __builtin_ia32_pbroadcastq512_gpr_mask (__A, (__v8di) __O, + __M); +} +#endif + +static __inline __m512i __DEFAULT_FN_ATTRS +_mm512_set_epi32 (int __A, int __B, int __C, int __D, + int __E, int __F, int __G, int __H, + int __I, int __J, int __K, int __L, + int __M, int __N, int __O, int __P) +{ + return __extension__ (__m512i)(__v16si) + { __P, __O, __N, __M, __L, __K, __J, __I, + __H, __G, __F, __E, __D, __C, __B, __A }; +} + +#define _mm512_setr_epi32(e0,e1,e2,e3,e4,e5,e6,e7, \ + e8,e9,e10,e11,e12,e13,e14,e15) \ + _mm512_set_epi32((e15),(e14),(e13),(e12),(e11),(e10),(e9),(e8),(e7),(e6), \ + (e5),(e4),(e3),(e2),(e1),(e0)) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_set_epi64 (long long __A, long long __B, long long __C, + long long __D, long long __E, long long __F, + long long __G, long long __H) +{ + return __extension__ (__m512i) (__v8di) + { __H, __G, __F, __E, __D, __C, __B, __A }; +} + +#define _mm512_setr_epi64(e0,e1,e2,e3,e4,e5,e6,e7) \ + _mm512_set_epi64((e7),(e6),(e5),(e4),(e3),(e2),(e1),(e0)) + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_set_pd (double __A, double __B, double __C, double __D, + double __E, double __F, double __G, double __H) +{ + return __extension__ (__m512d) + { __H, __G, __F, __E, __D, __C, __B, __A }; +} + +#define _mm512_setr_pd(e0,e1,e2,e3,e4,e5,e6,e7) \ + _mm512_set_pd((e7),(e6),(e5),(e4),(e3),(e2),(e1),(e0)) + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_set_ps (float __A, float __B, float __C, float __D, + float __E, float __F, float __G, float __H, + float __I, float __J, float __K, float __L, + float __M, float __N, float __O, float __P) +{ + return __extension__ (__m512) + { __P, __O, __N, __M, __L, __K, __J, __I, + __H, __G, __F, __E, __D, __C, __B, __A }; +} + +#define _mm512_setr_ps(e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10,e11,e12,e13,e14,e15) \ + _mm512_set_ps((e15),(e14),(e13),(e12),(e11),(e10),(e9),(e8),(e7),(e6),(e5), \ + (e4),(e3),(e2),(e1),(e0)) + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_abs_ps(__m512 __A) +{ + return (__m512)_mm512_and_epi32(_mm512_set1_epi32(0x7FFFFFFF),(__m512i)__A) ; +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS +_mm512_mask_abs_ps(__m512 __W, __mmask16 __K, __m512 __A) +{ + return (__m512)_mm512_mask_and_epi32((__m512i)__W, __K, _mm512_set1_epi32(0x7FFFFFFF),(__m512i)__A) ; +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_abs_pd(__m512d __A) +{ + return (__m512d)_mm512_and_epi64(_mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)__A) ; +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS +_mm512_mask_abs_pd(__m512d __W, __mmask8 __K, __m512d __A) +{ + return (__m512d)_mm512_mask_and_epi64((__v8di)__W, __K, _mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)__A); +} + +// Vector-reduction arithmetic accepts vectors as inputs and produces scalars as +// outputs. This class of vector operation forms the basis of many scientific +// computations. In vector-reduction arithmetic, the evaluation off is +// independent of the order of the input elements of V. + +// Used bisection method. At each step, we partition the vector with previous +// step in half, and the operation is performed on its two halves. +// This takes log2(n) steps where n is the number of elements in the vector. + +// Vec512 - Vector with size 512. +// Operator - Can be one of following: +,*,&,| +// T2 - Can get 'i' for int and 'f' for float. +// T1 - Can get 'i' for int and 'd' for double. + +#define _mm512_reduce_operator_64bit(Vec512, Operator, T2, T1) \ + __extension__({ \ + __m256##T1 Vec256 = __builtin_shufflevector( \ + (__v8d##T2)Vec512, \ + (__v8d##T2)Vec512, \ + 0, 1, 2, 3) \ + Operator \ + __builtin_shufflevector( \ + (__v8d##T2)Vec512, \ + (__v8d##T2)Vec512, \ + 4, 5, 6, 7); \ + __m128##T1 Vec128 = __builtin_shufflevector( \ + (__v4d##T2)Vec256, \ + (__v4d##T2)Vec256, \ + 0, 1) \ + Operator \ + __builtin_shufflevector( \ + (__v4d##T2)Vec256, \ + (__v4d##T2)Vec256, \ + 2, 3); \ + Vec128 = __builtin_shufflevector((__v2d##T2)Vec128, \ + (__v2d##T2)Vec128, 0, -1) \ + Operator \ + __builtin_shufflevector((__v2d##T2)Vec128, \ + (__v2d##T2)Vec128, 1, -1); \ + return Vec128[0]; \ + }) + +static __inline__ long long __DEFAULT_FN_ATTRS _mm512_reduce_add_epi64(__m512i __W) { + _mm512_reduce_operator_64bit(__W, +, i, i); +} + +static __inline__ long long __DEFAULT_FN_ATTRS _mm512_reduce_mul_epi64(__m512i __W) { + _mm512_reduce_operator_64bit(__W, *, i, i); +} + +static __inline__ long long __DEFAULT_FN_ATTRS _mm512_reduce_and_epi64(__m512i __W) { + _mm512_reduce_operator_64bit(__W, &, i, i); +} + +static __inline__ long long __DEFAULT_FN_ATTRS _mm512_reduce_or_epi64(__m512i __W) { + _mm512_reduce_operator_64bit(__W, |, i, i); +} + +static __inline__ double __DEFAULT_FN_ATTRS _mm512_reduce_add_pd(__m512d __W) { + _mm512_reduce_operator_64bit(__W, +, f, d); +} + +static __inline__ double __DEFAULT_FN_ATTRS _mm512_reduce_mul_pd(__m512d __W) { + _mm512_reduce_operator_64bit(__W, *, f, d); +} + +// Vec512 - Vector with size 512. +// Vec512Neutral - All vector elements set to the identity element. +// Identity element: {+,0},{*,1},{&,0xFFFFFFFFFFFFFFFF},{|,0} +// Operator - Can be one of following: +,*,&,| +// Mask - Intrinsic Mask +// T2 - Can get 'i' for int and 'f' for float. +// T1 - Can get 'i' for int and 'd' for packed double-precision. +// T3 - Can be Pd for packed double or q for q-word. + +#define _mm512_mask_reduce_operator_64bit(Vec512, Vec512Neutral, Operator, \ + Mask, T2, T1, T3) \ + __extension__({ \ + Vec512 = __builtin_ia32_select##T3##_512( \ + (__mmask8)Mask, \ + (__v8d##T2)Vec512, \ + (__v8d##T2)Vec512Neutral); \ + _mm512_reduce_operator_64bit(Vec512, Operator, T2, T1); \ + }) + +static __inline__ long long __DEFAULT_FN_ATTRS +_mm512_mask_reduce_add_epi64(__mmask8 __M, __m512i __W) { + _mm512_mask_reduce_operator_64bit(__W, _mm512_set1_epi64(0), +, __M, i, i, q); +} + +static __inline__ long long __DEFAULT_FN_ATTRS +_mm512_mask_reduce_mul_epi64(__mmask8 __M, __m512i __W) { + _mm512_mask_reduce_operator_64bit(__W, _mm512_set1_epi64(1), *, __M, i, i, q); +} + +static __inline__ long long __DEFAULT_FN_ATTRS +_mm512_mask_reduce_and_epi64(__mmask8 __M, __m512i __W) { + _mm512_mask_reduce_operator_64bit(__W, _mm512_set1_epi64(0xFFFFFFFFFFFFFFFF), + &, __M, i, i, q); +} + +static __inline__ long long __DEFAULT_FN_ATTRS +_mm512_mask_reduce_or_epi64(__mmask8 __M, __m512i __W) { + _mm512_mask_reduce_operator_64bit(__W, _mm512_set1_epi64(0), |, __M, + i, i, q); +} + +static __inline__ double __DEFAULT_FN_ATTRS +_mm512_mask_reduce_add_pd(__mmask8 __M, __m512d __W) { + _mm512_mask_reduce_operator_64bit(__W, _mm512_set1_pd(0), +, __M, + f, d, pd); +} + +static __inline__ double __DEFAULT_FN_ATTRS +_mm512_mask_reduce_mul_pd(__mmask8 __M, __m512d __W) { + _mm512_mask_reduce_operator_64bit(__W, _mm512_set1_pd(1), *, __M, + f, d, pd); +} + +// Vec512 - Vector with size 512. +// Operator - Can be one of following: +,*,&,| +// T2 - Can get 'i' for int and ' ' for packed single. +// T1 - Can get 'i' for int and 'f' for float. + +#define _mm512_reduce_operator_32bit(Vec512, Operator, T2, T1) __extension__({ \ + __m256##T1 Vec256 = \ + (__m256##T1)(__builtin_shufflevector( \ + (__v16s##T2)Vec512, \ + (__v16s##T2)Vec512, \ + 0, 1, 2, 3, 4, 5, 6, 7) \ + Operator \ + __builtin_shufflevector( \ + (__v16s##T2)Vec512, \ + (__v16s##T2)Vec512, \ + 8, 9, 10, 11, 12, 13, 14, 15)); \ + __m128##T1 Vec128 = \ + (__m128##T1)(__builtin_shufflevector( \ + (__v8s##T2)Vec256, \ + (__v8s##T2)Vec256, \ + 0, 1, 2, 3) \ + Operator \ + __builtin_shufflevector( \ + (__v8s##T2)Vec256, \ + (__v8s##T2)Vec256, \ + 4, 5, 6, 7)); \ + Vec128 = (__m128##T1)(__builtin_shufflevector( \ + (__v4s##T2)Vec128, \ + (__v4s##T2)Vec128, \ + 0, 1, -1, -1) \ + Operator \ + __builtin_shufflevector( \ + (__v4s##T2)Vec128, \ + (__v4s##T2)Vec128, \ + 2, 3, -1, -1)); \ + Vec128 = (__m128##T1)(__builtin_shufflevector( \ + (__v4s##T2)Vec128, \ + (__v4s##T2)Vec128, \ + 0, -1, -1, -1) \ + Operator \ + __builtin_shufflevector( \ + (__v4s##T2)Vec128, \ + (__v4s##T2)Vec128, \ + 1, -1, -1, -1)); \ + return Vec128[0]; \ + }) + +static __inline__ int __DEFAULT_FN_ATTRS +_mm512_reduce_add_epi32(__m512i __W) { + _mm512_reduce_operator_32bit(__W, +, i, i); +} + +static __inline__ int __DEFAULT_FN_ATTRS +_mm512_reduce_mul_epi32(__m512i __W) { + _mm512_reduce_operator_32bit(__W, *, i, i); +} + +static __inline__ int __DEFAULT_FN_ATTRS +_mm512_reduce_and_epi32(__m512i __W) { + _mm512_reduce_operator_32bit(__W, &, i, i); +} + +static __inline__ int __DEFAULT_FN_ATTRS +_mm512_reduce_or_epi32(__m512i __W) { + _mm512_reduce_operator_32bit(__W, |, i, i); +} + +static __inline__ float __DEFAULT_FN_ATTRS +_mm512_reduce_add_ps(__m512 __W) { + _mm512_reduce_operator_32bit(__W, +, f, ); +} + +static __inline__ float __DEFAULT_FN_ATTRS +_mm512_reduce_mul_ps(__m512 __W) { + _mm512_reduce_operator_32bit(__W, *, f, ); +} + +// Vec512 - Vector with size 512. +// Vec512Neutral - All vector elements set to the identity element. +// Identity element: {+,0},{*,1},{&,0xFFFFFFFF},{|,0} +// Operator - Can be one of following: +,*,&,| +// Mask - Intrinsic Mask +// T2 - Can get 'i' for int and 'f' for float. +// T1 - Can get 'i' for int and 'd' for double. +// T3 - Can be Ps for packed single or d for d-word. + +#define _mm512_mask_reduce_operator_32bit(Vec512, Vec512Neutral, Operator, \ + Mask, T2, T1, T3) \ + __extension__({ \ + Vec512 = (__m512##T1)__builtin_ia32_select##T3##_512( \ + (__mmask16)Mask, \ + (__v16s##T2)Vec512, \ + (__v16s##T2)Vec512Neutral); \ + _mm512_reduce_operator_32bit(Vec512, Operator, T2, T1); \ + }) + +static __inline__ int __DEFAULT_FN_ATTRS +_mm512_mask_reduce_add_epi32( __mmask16 __M, __m512i __W) { + _mm512_mask_reduce_operator_32bit(__W, _mm512_set1_epi32(0), +, __M, i, i, d); +} + +static __inline__ int __DEFAULT_FN_ATTRS +_mm512_mask_reduce_mul_epi32( __mmask16 __M, __m512i __W) { + _mm512_mask_reduce_operator_32bit(__W, _mm512_set1_epi32(1), *, __M, i, i, d); +} + +static __inline__ int __DEFAULT_FN_ATTRS +_mm512_mask_reduce_and_epi32( __mmask16 __M, __m512i __W) { + _mm512_mask_reduce_operator_32bit(__W, _mm512_set1_epi32(0xFFFFFFFF), &, __M, + i, i, d); +} + +static __inline__ int __DEFAULT_FN_ATTRS +_mm512_mask_reduce_or_epi32(__mmask16 __M, __m512i __W) { + _mm512_mask_reduce_operator_32bit(__W, _mm512_set1_epi32(0), |, __M, i, i, d); +} + +static __inline__ float __DEFAULT_FN_ATTRS +_mm512_mask_reduce_add_ps(__mmask16 __M, __m512 __W) { + _mm512_mask_reduce_operator_32bit(__W, _mm512_set1_ps(0), +, __M, f, , ps); +} + +static __inline__ float __DEFAULT_FN_ATTRS +_mm512_mask_reduce_mul_ps(__mmask16 __M, __m512 __W) { + _mm512_mask_reduce_operator_32bit(__W, _mm512_set1_ps(1), *, __M, f, , ps); +} + +// Used bisection method. At each step, we partition the vector with previous +// step in half, and the operation is performed on its two halves. +// This takes log2(n) steps where n is the number of elements in the vector. +// This macro uses only intrinsics from the AVX512F feature. + +// Vec512 - Vector with size of 512. +// IntrinName - Can be one of following: {max|min}_{epi64|epu64|pd} for example: +// __mm512_max_epi64 +// T1 - Can get 'i' for int and 'd' for double.[__m512{i|d}] +// T2 - Can get 'i' for int and 'f' for float. [__v8d{i|f}] + +#define _mm512_reduce_maxMin_64bit(Vec512, IntrinName, T1, T2) __extension__({ \ + Vec512 = _mm512_##IntrinName( \ + (__m512##T1)__builtin_shufflevector( \ + (__v8d##T2)Vec512, \ + (__v8d##T2)Vec512, \ + 0, 1, 2, 3, -1, -1, -1, -1), \ + (__m512##T1)__builtin_shufflevector( \ + (__v8d##T2)Vec512, \ + (__v8d##T2)Vec512, \ + 4, 5, 6, 7, -1, -1, -1, -1)); \ + Vec512 = _mm512_##IntrinName( \ + (__m512##T1)__builtin_shufflevector( \ + (__v8d##T2)Vec512, \ + (__v8d##T2)Vec512, \ + 0, 1, -1, -1, -1, -1, -1, -1),\ + (__m512##T1)__builtin_shufflevector( \ + (__v8d##T2)Vec512, \ + (__v8d##T2)Vec512, \ + 2, 3, -1, -1, -1, -1, -1, \ + -1)); \ + Vec512 = _mm512_##IntrinName( \ + (__m512##T1)__builtin_shufflevector( \ + (__v8d##T2)Vec512, \ + (__v8d##T2)Vec512, \ + 0, -1, -1, -1, -1, -1, -1, -1),\ + (__m512##T1)__builtin_shufflevector( \ + (__v8d##T2)Vec512, \ + (__v8d##T2)Vec512, \ + 1, -1, -1, -1, -1, -1, -1, -1))\ + ; \ + return Vec512[0]; \ + }) + +static __inline__ long long __DEFAULT_FN_ATTRS +_mm512_reduce_max_epi64(__m512i __V) { + _mm512_reduce_maxMin_64bit(__V, max_epi64, i, i); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_mm512_reduce_max_epu64(__m512i __V) { + _mm512_reduce_maxMin_64bit(__V, max_epu64, i, i); +} + +static __inline__ double __DEFAULT_FN_ATTRS +_mm512_reduce_max_pd(__m512d __V) { + _mm512_reduce_maxMin_64bit(__V, max_pd, d, f); +} + +static __inline__ long long __DEFAULT_FN_ATTRS _mm512_reduce_min_epi64 +(__m512i __V) { + _mm512_reduce_maxMin_64bit(__V, min_epi64, i, i); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_mm512_reduce_min_epu64(__m512i __V) { + _mm512_reduce_maxMin_64bit(__V, min_epu64, i, i); +} + +static __inline__ double __DEFAULT_FN_ATTRS +_mm512_reduce_min_pd(__m512d __V) { + _mm512_reduce_maxMin_64bit(__V, min_pd, d, f); +} + +// Vec512 - Vector with size 512. +// Vec512Neutral - A 512 length vector with elements set to the identity element +// Identity element: {max_epi,0x8000000000000000} +// {max_epu,0x0000000000000000} +// {max_pd, 0xFFF0000000000000} +// {min_epi,0x7FFFFFFFFFFFFFFF} +// {min_epu,0xFFFFFFFFFFFFFFFF} +// {min_pd, 0x7FF0000000000000} +// +// IntrinName - Can be one of following: {max|min}_{epi64|epu64|pd} for example: +// __mm512_max_epi64 +// T1 - Can get 'i' for int and 'd' for double.[__m512{i|d}] +// T2 - Can get 'i' for int and 'f' for float. [__v8d{i|f}] +// T3 - Can get 'q' q word and 'pd' for packed double. +// [__builtin_ia32_select{q|pd}_512] +// Mask - Intrinsic Mask + +#define _mm512_mask_reduce_maxMin_64bit(Vec512, Vec512Neutral, IntrinName, T1, \ + T2, T3, Mask) \ + __extension__({ \ + Vec512 = (__m512##T1)__builtin_ia32_select##T3##_512( \ + (__mmask8)Mask, \ + (__v8d##T2)Vec512, \ + (__v8d##T2)Vec512Neutral); \ + _mm512_reduce_maxMin_64bit(Vec512, IntrinName, T1, T2); \ + }) + +static __inline__ long long __DEFAULT_FN_ATTRS +_mm512_mask_reduce_max_epi64(__mmask8 __M, __m512i __V) { + _mm512_mask_reduce_maxMin_64bit(__V, _mm512_set1_epi64(0x8000000000000000), + max_epi64, i, i, q, __M); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_mm512_mask_reduce_max_epu64(__mmask8 __M, __m512i __V) { + _mm512_mask_reduce_maxMin_64bit(__V, _mm512_set1_epi64(0x0000000000000000), + max_epu64, i, i, q, __M); +} + +static __inline__ double __DEFAULT_FN_ATTRS +_mm512_mask_reduce_max_pd(__mmask8 __M, __m512d __V) { + _mm512_mask_reduce_maxMin_64bit(__V, -_mm512_set1_pd(__builtin_inf()), + max_pd, d, f, pd, __M); +} + +static __inline__ long long __DEFAULT_FN_ATTRS +_mm512_mask_reduce_min_epi64(__mmask8 __M, __m512i __V) { + _mm512_mask_reduce_maxMin_64bit(__V, _mm512_set1_epi64(0x7FFFFFFFFFFFFFFF), + min_epi64, i, i, q, __M); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __V) { + _mm512_mask_reduce_maxMin_64bit(__V, _mm512_set1_epi64(0xFFFFFFFFFFFFFFFF), + min_epu64, i, i, q, __M); +} + +static __inline__ double __DEFAULT_FN_ATTRS +_mm512_mask_reduce_min_pd(__mmask8 __M, __m512d __V) { + _mm512_mask_reduce_maxMin_64bit(__V, _mm512_set1_pd(__builtin_inf()), + min_pd, d, f, pd, __M); +} + +// Vec512 - Vector with size 512. +// IntrinName - Can be one of following: {max|min}_{epi32|epu32|ps} for example: +// __mm512_max_epi32 +// T1 - Can get 'i' for int and ' ' .[__m512{i|}] +// T2 - Can get 'i' for int and 'f' for float.[__v16s{i|f}] + +#define _mm512_reduce_maxMin_32bit(Vec512, IntrinName, T1, T2) __extension__({ \ + Vec512 = _mm512_##IntrinName( \ + (__m512##T1)__builtin_shufflevector( \ + (__v16s##T2)Vec512, \ + (__v16s##T2)Vec512, \ + 0, 1, 2, 3, 4, 5, 6, 7, \ + -1, -1, -1, -1, -1, -1, -1, -1), \ + (__m512##T1)__builtin_shufflevector( \ + (__v16s##T2)Vec512, \ + (__v16s##T2)Vec512, \ + 8, 9, 10, 11, 12, 13, 14, 15, \ + -1, -1, -1, -1, -1, -1, -1, -1)); \ + Vec512 = _mm512_##IntrinName( \ + (__m512##T1)__builtin_shufflevector( \ + (__v16s##T2)Vec512, \ + (__v16s##T2)Vec512, \ + 0, 1, 2, 3, -1, -1, -1, -1, \ + -1, -1, -1, -1, -1, -1, -1, -1), \ + (__m512##T1)__builtin_shufflevector( \ + (__v16s##T2)Vec512, \ + (__v16s##T2)Vec512, \ + 4, 5, 6, 7, -1, -1, -1, -1, \ + -1, -1, -1, -1, -1, -1, -1, -1)); \ + Vec512 = _mm512_##IntrinName( \ + (__m512##T1)__builtin_shufflevector( \ + (__v16s##T2)Vec512, \ + (__v16s##T2)Vec512, \ + 0, 1, -1, -1, -1, -1, -1, -1, \ + -1, -1, -1, -1, -1, -1, -1, -1), \ + (__m512##T1)__builtin_shufflevector( \ + (__v16s##T2)Vec512, \ + (__v16s##T2)Vec512, \ + 2, 3, -1, -1, -1, -1, -1, -1, \ + -1, -1, -1, -1, -1, -1, -1, -1)); \ + Vec512 = _mm512_##IntrinName( \ + (__m512##T1)__builtin_shufflevector( \ + (__v16s##T2)Vec512, \ + (__v16s##T2)Vec512, \ + 0, -1, -1, -1, -1, -1, -1, -1, \ + -1, -1, -1, -1, -1, -1, -1, -1), \ + (__m512##T1)__builtin_shufflevector( \ + (__v16s##T2)Vec512, \ + (__v16s##T2)Vec512, \ + 1, -1, -1, -1, -1, -1, -1, -1, \ + -1, -1, -1, -1, -1, -1, -1, -1)); \ + return Vec512[0]; \ + }) + +static __inline__ int __DEFAULT_FN_ATTRS _mm512_reduce_max_epi32(__m512i a) { + _mm512_reduce_maxMin_32bit(a, max_epi32, i, i); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_mm512_reduce_max_epu32(__m512i a) { + _mm512_reduce_maxMin_32bit(a, max_epu32, i, i); +} + +static __inline__ float __DEFAULT_FN_ATTRS _mm512_reduce_max_ps(__m512 a) { + _mm512_reduce_maxMin_32bit(a, max_ps, , f); +} + +static __inline__ int __DEFAULT_FN_ATTRS _mm512_reduce_min_epi32(__m512i a) { + _mm512_reduce_maxMin_32bit(a, min_epi32, i, i); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_mm512_reduce_min_epu32(__m512i a) { + _mm512_reduce_maxMin_32bit(a, min_epu32, i, i); +} + +static __inline__ float __DEFAULT_FN_ATTRS _mm512_reduce_min_ps(__m512 a) { + _mm512_reduce_maxMin_32bit(a, min_ps, , f); +} + +// Vec512 - Vector with size 512. +// Vec512Neutral - A 512 length vector with elements set to the identity element +// Identity element: {max_epi,0x80000000} +// {max_epu,0x00000000} +// {max_ps, 0xFF800000} +// {min_epi,0x7FFFFFFF} +// {min_epu,0xFFFFFFFF} +// {min_ps, 0x7F800000} +// +// IntrinName - Can be one of following: {max|min}_{epi32|epu32|ps} for example: +// __mm512_max_epi32 +// T1 - Can get 'i' for int and ' ' .[__m512{i|}] +// T2 - Can get 'i' for int and 'f' for float.[__v16s{i|f}] +// T3 - Can get 'q' q word and 'pd' for packed double. +// [__builtin_ia32_select{q|pd}_512] +// Mask - Intrinsic Mask + +#define _mm512_mask_reduce_maxMin_32bit(Vec512, Vec512Neutral, IntrinName, T1, \ + T2, T3, Mask) \ + __extension__({ \ + Vec512 = (__m512##T1)__builtin_ia32_select##T3##_512( \ + (__mmask16)Mask, \ + (__v16s##T2)Vec512, \ + (__v16s##T2)Vec512Neutral); \ + _mm512_reduce_maxMin_32bit(Vec512, IntrinName, T1, T2); \ + }) + +static __inline__ int __DEFAULT_FN_ATTRS +_mm512_mask_reduce_max_epi32(__mmask16 __M, __m512i __V) { + _mm512_mask_reduce_maxMin_32bit(__V, _mm512_set1_epi32(0x80000000), max_epi32, + i, i, d, __M); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_mm512_mask_reduce_max_epu32(__mmask16 __M, __m512i __V) { + _mm512_mask_reduce_maxMin_32bit(__V, _mm512_set1_epi32(0x00000000), max_epu32, + i, i, d, __M); +} + +static __inline__ float __DEFAULT_FN_ATTRS +_mm512_mask_reduce_max_ps(__mmask16 __M, __m512 __V) { + _mm512_mask_reduce_maxMin_32bit(__V,-_mm512_set1_ps(__builtin_inff()), max_ps, , f, + ps, __M); +} + +static __inline__ int __DEFAULT_FN_ATTRS +_mm512_mask_reduce_min_epi32(__mmask16 __M, __m512i __V) { + _mm512_mask_reduce_maxMin_32bit(__V, _mm512_set1_epi32(0x7FFFFFFF), min_epi32, + i, i, d, __M); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __V) { + _mm512_mask_reduce_maxMin_32bit(__V, _mm512_set1_epi32(0xFFFFFFFF), min_epu32, + i, i, d, __M); +} + +static __inline__ float __DEFAULT_FN_ATTRS +_mm512_mask_reduce_min_ps(__mmask16 __M, __m512 __V) { + _mm512_mask_reduce_maxMin_32bit(__V, _mm512_set1_ps(__builtin_inff()), min_ps, , f, + ps, __M); +} + #undef __DEFAULT_FN_ATTRS #endif // __AVX512FINTRIN_H diff --git a/c_headers/avx512ifmaintrin.h b/c_headers/avx512ifmaintrin.h new file mode 100644 index 000000000..5defbaea8 --- /dev/null +++ b/c_headers/avx512ifmaintrin.h @@ -0,0 +1,92 @@ +/*===------------- avx512ifmaintrin.h - IFMA intrinsics ------------------=== + * + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __IFMAINTRIN_H +#define __IFMAINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512ifma"))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_madd52hi_epu64 (__m512i __X, __m512i __Y, __m512i __Z) +{ + return (__m512i) __builtin_ia32_vpmadd52huq512_mask ((__v8di) __X, + (__v8di) __Y, + (__v8di) __Z, + (__mmask8) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_madd52hi_epu64 (__m512i __W, __mmask8 __M, __m512i __X, + __m512i __Y) +{ + return (__m512i) __builtin_ia32_vpmadd52huq512_mask ((__v8di) __W, + (__v8di) __X, + (__v8di) __Y, + (__mmask8) __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_madd52hi_epu64 (__mmask8 __M, __m512i __X, __m512i __Y, __m512i __Z) +{ + return (__m512i) __builtin_ia32_vpmadd52huq512_maskz ((__v8di) __X, + (__v8di) __Y, + (__v8di) __Z, + (__mmask8) __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_madd52lo_epu64 (__m512i __X, __m512i __Y, __m512i __Z) +{ + return (__m512i) __builtin_ia32_vpmadd52luq512_mask ((__v8di) __X, + (__v8di) __Y, + (__v8di) __Z, + (__mmask8) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_madd52lo_epu64 (__m512i __W, __mmask8 __M, __m512i __X, + __m512i __Y) +{ + return (__m512i) __builtin_ia32_vpmadd52luq512_mask ((__v8di) __W, + (__v8di) __X, + (__v8di) __Y, + (__mmask8) __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_madd52lo_epu64 (__mmask8 __M, __m512i __X, __m512i __Y, __m512i __Z) +{ + return (__m512i) __builtin_ia32_vpmadd52luq512_maskz ((__v8di) __X, + (__v8di) __Y, + (__v8di) __Z, + (__mmask8) __M); +} + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/c_headers/avx512ifmavlintrin.h b/c_headers/avx512ifmavlintrin.h new file mode 100644 index 000000000..131ee5cb4 --- /dev/null +++ b/c_headers/avx512ifmavlintrin.h @@ -0,0 +1,149 @@ +/*===------------- avx512ifmavlintrin.h - IFMA intrinsics ------------------=== + * + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __IFMAVLINTRIN_H +#define __IFMAVLINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512ifma,avx512vl"))) + + + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_madd52hi_epu64 (__m128i __X, __m128i __Y, __m128i __Z) +{ + return (__m128i) __builtin_ia32_vpmadd52huq128_mask ((__v2di) __X, + (__v2di) __Y, + (__v2di) __Z, + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_madd52hi_epu64 (__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_vpmadd52huq128_mask ((__v2di) __W, + (__v2di) __X, + (__v2di) __Y, + (__mmask8) __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_madd52hi_epu64 (__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z) +{ + return (__m128i) __builtin_ia32_vpmadd52huq128_maskz ((__v2di) __X, + (__v2di) __Y, + (__v2di) __Z, + (__mmask8) __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_madd52hi_epu64 (__m256i __X, __m256i __Y, __m256i __Z) +{ + return (__m256i) __builtin_ia32_vpmadd52huq256_mask ((__v4di) __X, + (__v4di) __Y, + (__v4di) __Z, + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_madd52hi_epu64 (__m256i __W, __mmask8 __M, __m256i __X, + __m256i __Y) +{ + return (__m256i) __builtin_ia32_vpmadd52huq256_mask ((__v4di) __W, + (__v4di) __X, + (__v4di) __Y, + (__mmask8) __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_madd52hi_epu64 (__mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z) +{ + return (__m256i) __builtin_ia32_vpmadd52huq256_maskz ((__v4di) __X, + (__v4di) __Y, + (__v4di) __Z, + (__mmask8) __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_madd52lo_epu64 (__m128i __X, __m128i __Y, __m128i __Z) +{ + return (__m128i) __builtin_ia32_vpmadd52luq128_mask ((__v2di) __X, + (__v2di) __Y, + (__v2di) __Z, + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_madd52lo_epu64 (__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_vpmadd52luq128_mask ((__v2di) __W, + (__v2di) __X, + (__v2di) __Y, + (__mmask8) __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_madd52lo_epu64 (__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z) +{ + return (__m128i) __builtin_ia32_vpmadd52luq128_maskz ((__v2di) __X, + (__v2di) __Y, + (__v2di) __Z, + (__mmask8) __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_madd52lo_epu64 (__m256i __X, __m256i __Y, __m256i __Z) +{ + return (__m256i) __builtin_ia32_vpmadd52luq256_mask ((__v4di) __X, + (__v4di) __Y, + (__v4di) __Z, + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_madd52lo_epu64 (__m256i __W, __mmask8 __M, __m256i __X, + __m256i __Y) +{ + return (__m256i) __builtin_ia32_vpmadd52luq256_mask ((__v4di) __W, + (__v4di) __X, + (__v4di) __Y, + (__mmask8) __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_madd52lo_epu64 (__mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z) +{ + return (__m256i) __builtin_ia32_vpmadd52luq256_maskz ((__v4di) __X, + (__v4di) __Y, + (__v4di) __Z, + (__mmask8) __M); +} + + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/c_headers/avx512pfintrin.h b/c_headers/avx512pfintrin.h new file mode 100644 index 000000000..c7fa3cf31 --- /dev/null +++ b/c_headers/avx512pfintrin.h @@ -0,0 +1,111 @@ +/*===------------- avx512pfintrin.h - PF intrinsics ------------------=== + * + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512PFINTRIN_H +#define __AVX512PFINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512pf"))) + +#define _mm512_mask_prefetch_i32gather_pd(index, mask, addr, scale, hint) __extension__ ({\ + __builtin_ia32_gatherpfdpd((__mmask8)(mask), (__v8si)(__m256i)(index), \ + (long long const *)(addr), (int)(scale), \ + (int)(hint)); }) + +#define _mm512_prefetch_i32gather_pd(index, addr, scale, hint) __extension__ ({\ + __builtin_ia32_gatherpfdpd((__mmask8) -1, (__v8si)(__m256i)(index), \ + (long long const *)(addr), (int)(scale), \ + (int)(hint)); }) + +#define _mm512_mask_prefetch_i32gather_ps(index, mask, addr, scale, hint) ({\ + __builtin_ia32_gatherpfdps((__mmask16)(mask), \ + (__v16si)(__m512i)(index), (int const *)(addr), \ + (int)(scale), (int)(hint)); }) + +#define _mm512_prefetch_i32gather_ps(index, addr, scale, hint) ({\ + __builtin_ia32_gatherpfdps((__mmask16) -1, \ + (__v16si)(__m512i)(index), (int const *)(addr), \ + (int)(scale), (int)(hint)); }) + +#define _mm512_mask_prefetch_i64gather_pd(index, mask, addr, scale, hint) __extension__ ({\ + __builtin_ia32_gatherpfqpd((__mmask8)(mask), (__v8di)(__m512i)(index), \ + (long long const *)(addr), (int)(scale), \ + (int)(hint)); }) + +#define _mm512_prefetch_i64gather_pd(index, addr, scale, hint) __extension__ ({\ + __builtin_ia32_gatherpfqpd((__mmask8) -1, (__v8di)(__m512i)(index), \ + (long long const *)(addr), (int)(scale), \ + (int)(hint)); }) + +#define _mm512_mask_prefetch_i64gather_ps(index, mask, addr, scale, hint) ({\ + __builtin_ia32_gatherpfqps((__mmask8)(mask), (__v8di)(__m512i)(index), \ + (int const *)(addr), (int)(scale), (int)(hint)); }) + +#define _mm512_prefetch_i64gather_ps(index, addr, scale, hint) ({\ + __builtin_ia32_gatherpfqps((__mmask8) -1, (__v8di)(__m512i)(index), \ + (int const *)(addr), (int)(scale), (int)(hint)); }) + +#define _mm512_prefetch_i32scatter_pd(addr, index, scale, hint) __extension__ ({\ + __builtin_ia32_scatterpfdpd((__mmask8)-1, (__v8si)(__m256i)(index), \ + (long long *)(addr), (int)(scale), \ + (int)(hint)); }) + +#define _mm512_mask_prefetch_i32scatter_pd(addr, mask, index, scale, hint) __extension__ ({\ + __builtin_ia32_scatterpfdpd((__mmask8)(mask), (__v8si)(__m256i)(index), \ + (long long *)(addr), (int)(scale), \ + (int)(hint)); }) + +#define _mm512_prefetch_i32scatter_ps(addr, index, scale, hint) __extension__ ({\ + __builtin_ia32_scatterpfdps((__mmask16)-1, (__v16si)(__m512i)(index), \ + (int *)(addr), (int)(scale), (int)(hint)); }) + +#define _mm512_mask_prefetch_i32scatter_ps(addr, mask, index, scale, hint) __extension__ ({\ + __builtin_ia32_scatterpfdps((__mmask16)(mask), \ + (__v16si)(__m512i)(index), (int *)(addr), \ + (int)(scale), (int)(hint)); }) + +#define _mm512_prefetch_i64scatter_pd(addr, index, scale, hint) __extension__ ({\ + __builtin_ia32_scatterpfqpd((__mmask8)-1, (__v8di)(__m512i)(index), \ + (long long *)(addr), (int)(scale), \ + (int)(hint)); }) + +#define _mm512_mask_prefetch_i64scatter_pd(addr, mask, index, scale, hint) __extension__ ({\ + __builtin_ia32_scatterpfqpd((__mmask8)(mask), (__v8di)(__m512i)(index), \ + (long long *)(addr), (int)(scale), \ + (int)(hint)); }) + +#define _mm512_prefetch_i64scatter_ps(addr, index, scale, hint) __extension__ ({\ + __builtin_ia32_scatterpfqps((__mmask8)-1, (__v8di)(__m512i)(index), \ + (int *)(addr), (int)(scale), (int)(hint)); }) + +#define _mm512_mask_prefetch_i64scatter_ps(addr, mask, index, scale, hint) __extension__ ({\ + __builtin_ia32_scatterpfqps((__mmask8)(mask), (__v8di)(__m512i)(index), \ + (int *)(addr), (int)(scale), (int)(hint)); }) + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/c_headers/avx512vbmiintrin.h b/c_headers/avx512vbmiintrin.h new file mode 100644 index 000000000..837238eda --- /dev/null +++ b/c_headers/avx512vbmiintrin.h @@ -0,0 +1,137 @@ +/*===------------- avx512vbmiintrin.h - VBMI intrinsics ------------------=== + * + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __VBMIINTRIN_H +#define __VBMIINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi"))) + + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask2_permutex2var_epi8 (__m512i __A, __m512i __I, + __mmask64 __U, __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermi2varqi512_mask ((__v64qi) __A, + (__v64qi) __I + /* idx */ , + (__v64qi) __B, + (__mmask64) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_permutex2var_epi8 (__m512i __A, __m512i __I, __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermt2varqi512_mask ((__v64qi) __I + /* idx */ , + (__v64qi) __A, + (__v64qi) __B, + (__mmask64) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_permutex2var_epi8 (__m512i __A, __mmask64 __U, + __m512i __I, __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermt2varqi512_mask ((__v64qi) __I + /* idx */ , + (__v64qi) __A, + (__v64qi) __B, + (__mmask64) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_permutex2var_epi8 (__mmask64 __U, __m512i __A, + __m512i __I, __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermt2varqi512_maskz ((__v64qi) __I + /* idx */ , + (__v64qi) __A, + (__v64qi) __B, + (__mmask64) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_permutexvar_epi8 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_permvarqi512_mask ((__v64qi) __B, + (__v64qi) __A, + (__v64qi) _mm512_undefined_epi32 (), + (__mmask64) -1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_permutexvar_epi8 (__mmask64 __M, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_permvarqi512_mask ((__v64qi) __B, + (__v64qi) __A, + (__v64qi) _mm512_setzero_si512(), + (__mmask64) __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_permutexvar_epi8 (__m512i __W, __mmask64 __M, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_permvarqi512_mask ((__v64qi) __B, + (__v64qi) __A, + (__v64qi) __W, + (__mmask64) __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_multishift_epi64_epi8 (__m512i __W, __mmask64 __M, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_vpmultishiftqb512_mask ((__v64qi) __X, + (__v64qi) __Y, + (__v64qi) __W, + (__mmask64) __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_multishift_epi64_epi8 (__mmask64 __M, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_vpmultishiftqb512_mask ((__v64qi) __X, + (__v64qi) __Y, + (__v64qi) _mm512_setzero_si512 (), + (__mmask64) __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_multishift_epi64_epi8 (__m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_vpmultishiftqb512_mask ((__v64qi) __X, + (__v64qi) __Y, + (__v64qi) _mm512_undefined_epi32 (), + (__mmask64) -1); +} + + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/c_headers/avx512vbmivlintrin.h b/c_headers/avx512vbmivlintrin.h new file mode 100644 index 000000000..105c6d142 --- /dev/null +++ b/c_headers/avx512vbmivlintrin.h @@ -0,0 +1,247 @@ +/*===------------- avx512vbmivlintrin.h - VBMI intrinsics ------------------=== + * + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __VBMIVLINTRIN_H +#define __VBMIVLINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi,avx512vl"))) + + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask2_permutex2var_epi8 (__m128i __A, __m128i __I, __mmask16 __U, + __m128i __B) +{ + return (__m128i) __builtin_ia32_vpermi2varqi128_mask ((__v16qi) __A, + (__v16qi) __I + /* idx */ , + (__v16qi) __B, + (__mmask16) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask2_permutex2var_epi8 (__m256i __A, __m256i __I, + __mmask32 __U, __m256i __B) +{ + return (__m256i) __builtin_ia32_vpermi2varqi256_mask ((__v32qi) __A, + (__v32qi) __I + /* idx */ , + (__v32qi) __B, + (__mmask32) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_permutex2var_epi8 (__m128i __A, __m128i __I, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpermt2varqi128_mask ((__v16qi) __I + /* idx */ , + (__v16qi) __A, + (__v16qi) __B, + (__mmask16) - + 1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_permutex2var_epi8 (__m128i __A, __mmask16 __U, __m128i __I, + __m128i __B) +{ + return (__m128i) __builtin_ia32_vpermt2varqi128_mask ((__v16qi) __I + /* idx */ , + (__v16qi) __A, + (__v16qi) __B, + (__mmask16) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_permutex2var_epi8 (__mmask16 __U, __m128i __A, __m128i __I, + __m128i __B) +{ + return (__m128i) __builtin_ia32_vpermt2varqi128_maskz ((__v16qi) __I + /* idx */ , + (__v16qi) __A, + (__v16qi) __B, + (__mmask16) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_permutex2var_epi8 (__m256i __A, __m256i __I, __m256i __B) +{ + return (__m256i) __builtin_ia32_vpermt2varqi256_mask ((__v32qi) __I + /* idx */ , + (__v32qi) __A, + (__v32qi) __B, + (__mmask32) - + 1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_permutex2var_epi8 (__m256i __A, __mmask32 __U, + __m256i __I, __m256i __B) +{ + return (__m256i) __builtin_ia32_vpermt2varqi256_mask ((__v32qi) __I + /* idx */ , + (__v32qi) __A, + (__v32qi) __B, + (__mmask32) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_permutex2var_epi8 (__mmask32 __U, __m256i __A, + __m256i __I, __m256i __B) +{ + return (__m256i) __builtin_ia32_vpermt2varqi256_maskz ((__v32qi) __I + /* idx */ , + (__v32qi) __A, + (__v32qi) __B, + (__mmask32) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_permutexvar_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_permvarqi128_mask ((__v16qi) __B, + (__v16qi) __A, + (__v16qi) _mm_undefined_si128 (), + (__mmask16) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_permutexvar_epi8 (__mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_permvarqi128_mask ((__v16qi) __B, + (__v16qi) __A, + (__v16qi) _mm_setzero_si128 (), + (__mmask16) __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_permutexvar_epi8 (__m128i __W, __mmask16 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_permvarqi128_mask ((__v16qi) __B, + (__v16qi) __A, + (__v16qi) __W, + (__mmask16) __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_permutexvar_epi8 (__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_permvarqi256_mask ((__v32qi) __B, + (__v32qi) __A, + (__v32qi) _mm256_undefined_si256 (), + (__mmask32) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_permutexvar_epi8 (__mmask32 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_permvarqi256_mask ((__v32qi) __B, + (__v32qi) __A, + (__v32qi) _mm256_setzero_si256 (), + (__mmask32) __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_permutexvar_epi8 (__m256i __W, __mmask32 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_permvarqi256_mask ((__v32qi) __B, + (__v32qi) __A, + (__v32qi) __W, + (__mmask32) __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_multishift_epi64_epi8 (__m128i __W, __mmask16 __M, __m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_vpmultishiftqb128_mask ((__v16qi) __X, + (__v16qi) __Y, + (__v16qi) __W, + (__mmask16) __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_multishift_epi64_epi8 (__mmask16 __M, __m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_vpmultishiftqb128_mask ((__v16qi) __X, + (__v16qi) __Y, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_multishift_epi64_epi8 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_vpmultishiftqb128_mask ((__v16qi) __X, + (__v16qi) __Y, + (__v16qi) + _mm_undefined_si128 (), + (__mmask16) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_multishift_epi64_epi8 (__m256i __W, __mmask32 __M, __m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_vpmultishiftqb256_mask ((__v32qi) __X, + (__v32qi) __Y, + (__v32qi) __W, + (__mmask32) __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_multishift_epi64_epi8 (__mmask32 __M, __m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_vpmultishiftqb256_mask ((__v32qi) __X, + (__v32qi) __Y, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_multishift_epi64_epi8 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_vpmultishiftqb256_mask ((__v32qi) __X, + (__v32qi) __Y, + (__v32qi) + _mm256_undefined_si256 (), + (__mmask32) -1); +} + + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/c_headers/avx512vlbwintrin.h b/c_headers/avx512vlbwintrin.h index 74ec17583..3b58d0433 100644 --- a/c_headers/avx512vlbwintrin.h +++ b/c_headers/avx512vlbwintrin.h @@ -1,4 +1,4 @@ -/*===---- avx512vlbwintrin.h - AVX512VL and AVX512BW intrinsics ----------=== +/*===---- avx512vlbwintrin.h - AVX512VL and AVX512BW intrinsics ------------=== * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -29,7 +29,12 @@ #define __AVX512VLBWINTRIN_H /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512bw"))) + +static __inline __m128i __DEFAULT_FN_ATTRS +_mm_setzero_hi(void){ + return (__m128i)(__v8hi){ 0, 0, 0, 0, 0, 0, 0, 0 }; +} /* Integer compare */ @@ -610,1138 +615,974 @@ _mm256_mask_cmpneq_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) { } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_add_epi8 (__m256i __W, __mmask32 __U, __m256i __A, __m256i __B){ - return (__m256i) __builtin_ia32_paddb256_mask ((__v32qi) __A, - (__v32qi) __B, - (__v32qi) __W, - (__mmask32) __U); +_mm256_mask_add_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B){ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_add_epi8(__A, __B), + (__v32qi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_add_epi8 (__mmask32 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_paddb256_mask ((__v32qi) __A, - (__v32qi) __B, - (__v32qi) - _mm256_setzero_si256 (), - (__mmask32) __U); +_mm256_maskz_add_epi8(__mmask32 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_add_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_add_epi16 (__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_paddw256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v16hi) __W, - (__mmask16) __U); +_mm256_mask_add_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_add_epi16(__A, __B), + (__v16hi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_add_epi16 (__mmask16 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_paddw256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v16hi) - _mm256_setzero_si256 (), - (__mmask16) __U); +_mm256_maskz_add_epi16(__mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_add_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_sub_epi8 (__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_psubb256_mask ((__v32qi) __A, - (__v32qi) __B, - (__v32qi) __W, - (__mmask32) __U); +_mm256_mask_sub_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_sub_epi8(__A, __B), + (__v32qi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_sub_epi8 (__mmask32 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_psubb256_mask ((__v32qi) __A, - (__v32qi) __B, - (__v32qi) - _mm256_setzero_si256 (), - (__mmask32) __U); +_mm256_maskz_sub_epi8(__mmask32 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_sub_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_sub_epi16 (__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_psubw256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v16hi) __W, - (__mmask16) __U); +_mm256_mask_sub_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_sub_epi16(__A, __B), + (__v16hi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_sub_epi16 (__mmask16 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_psubw256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v16hi) - _mm256_setzero_si256 (), - (__mmask16) __U); -} -static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_add_epi8 (__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_paddb128_mask ((__v16qi) __A, - (__v16qi) __B, - (__v16qi) __W, - (__mmask16) __U); +_mm256_maskz_sub_epi16(__mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_sub_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_add_epi8 (__mmask16 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_paddb128_mask ((__v16qi) __A, - (__v16qi) __B, - (__v16qi) - _mm_setzero_si128 (), - (__mmask16) __U); +_mm_mask_add_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_add_epi8(__A, __B), + (__v16qi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_add_epi16 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_paddw128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v8hi) __W, - (__mmask8) __U); +_mm_maskz_add_epi8(__mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_add_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_add_epi16 (__mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_paddw128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v8hi) - _mm_setzero_si128 (), - (__mmask8) __U); +_mm_mask_add_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_add_epi16(__A, __B), + (__v8hi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_sub_epi8 (__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_psubb128_mask ((__v16qi) __A, - (__v16qi) __B, - (__v16qi) __W, - (__mmask16) __U); +_mm_maskz_add_epi16(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_add_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_sub_epi8 (__mmask16 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_psubb128_mask ((__v16qi) __A, - (__v16qi) __B, - (__v16qi) - _mm_setzero_si128 (), - (__mmask16) __U); +_mm_mask_sub_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_sub_epi8(__A, __B), + (__v16qi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_sub_epi16 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_psubw128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v8hi) __W, - (__mmask8) __U); +_mm_maskz_sub_epi8(__mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_sub_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_sub_epi16 (__mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_psubw128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v8hi) - _mm_setzero_si128 (), - (__mmask8) __U); +_mm_mask_sub_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_sub_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_sub_epi16(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_sub_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_mullo_epi16 (__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pmullw256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v16hi) __W, - (__mmask16) __U); +_mm256_mask_mullo_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_mullo_epi16(__A, __B), + (__v16hi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_mullo_epi16 (__mmask16 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pmullw256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v16hi) - _mm256_setzero_si256 (), - (__mmask16) __U); +_mm256_maskz_mullo_epi16(__mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_mullo_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_mullo_epi16 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pmullw128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v8hi) __W, - (__mmask8) __U); +_mm_mask_mullo_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_mullo_epi16(__A, __B), + (__v8hi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_mullo_epi16 (__mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pmullw128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v8hi) - _mm_setzero_si128 (), - (__mmask8) __U); +_mm_maskz_mullo_epi16(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_mullo_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); } static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mask_blend_epi8 (__mmask16 __U, __m128i __A, __m128i __W) { - return (__m128i) __builtin_ia32_blendmb_128_mask ((__v16qi) __A, - (__v16qi) __W, - (__mmask16) __U); + return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U, + (__v16qi) __W, + (__v16qi) __A); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_mask_blend_epi8 (__mmask32 __U, __m256i __A, __m256i __W) { - return (__m256i) __builtin_ia32_blendmb_256_mask ((__v32qi) __A, + return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U, (__v32qi) __W, - (__mmask32) __U); + (__v32qi) __A); } static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mask_blend_epi16 (__mmask8 __U, __m128i __A, __m128i __W) { - return (__m128i) __builtin_ia32_blendmw_128_mask ((__v8hi) __A, + return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U, (__v8hi) __W, - (__mmask8) __U); + (__v8hi) __A); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_mask_blend_epi16 (__mmask16 __U, __m256i __A, __m256i __W) { - return (__m256i) __builtin_ia32_blendmw_256_mask ((__v16hi) __A, + return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U, (__v16hi) __W, - (__mmask16) __U); + (__v16hi) __A); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_abs_epi8 (__m128i __W, __mmask16 __U, __m128i __A) +_mm_mask_abs_epi8(__m128i __W, __mmask16 __U, __m128i __A) { - return (__m128i) __builtin_ia32_pabsb128_mask ((__v16qi) __A, - (__v16qi) __W, - (__mmask16) __U); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_abs_epi8(__A), + (__v16qi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_abs_epi8 (__mmask16 __U, __m128i __A) +_mm_maskz_abs_epi8(__mmask16 __U, __m128i __A) { - return (__m128i) __builtin_ia32_pabsb128_mask ((__v16qi) __A, - (__v16qi) _mm_setzero_si128 (), - (__mmask16) __U); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_abs_epi8(__A), + (__v16qi)_mm_setzero_si128()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_abs_epi8 (__m256i __W, __mmask32 __U, __m256i __A) +_mm256_mask_abs_epi8(__m256i __W, __mmask32 __U, __m256i __A) { - return (__m256i) __builtin_ia32_pabsb256_mask ((__v32qi) __A, - (__v32qi) __W, - (__mmask32) __U); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_abs_epi8(__A), + (__v32qi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_maskz_abs_epi8 (__mmask32 __U, __m256i __A) { - return (__m256i) __builtin_ia32_pabsb256_mask ((__v32qi) __A, - (__v32qi) _mm256_setzero_si256 (), - (__mmask32) __U); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_abs_epi8(__A), + (__v32qi)_mm256_setzero_si256()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_abs_epi16 (__m128i __W, __mmask8 __U, __m128i __A) +_mm_mask_abs_epi16(__m128i __W, __mmask8 __U, __m128i __A) { - return (__m128i) __builtin_ia32_pabsw128_mask ((__v8hi) __A, - (__v8hi) __W, - (__mmask8) __U); + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_abs_epi16(__A), + (__v8hi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_abs_epi16 (__mmask8 __U, __m128i __A) +_mm_maskz_abs_epi16(__mmask8 __U, __m128i __A) { - return (__m128i) __builtin_ia32_pabsw128_mask ((__v8hi) __A, - (__v8hi) _mm_setzero_si128 (), - (__mmask8) __U); + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_abs_epi16(__A), + (__v8hi)_mm_setzero_si128()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_abs_epi16 (__m256i __W, __mmask16 __U, __m256i __A) +_mm256_mask_abs_epi16(__m256i __W, __mmask16 __U, __m256i __A) { - return (__m256i) __builtin_ia32_pabsw256_mask ((__v16hi) __A, - (__v16hi) __W, - (__mmask16) __U); + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_abs_epi16(__A), + (__v16hi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_abs_epi16 (__mmask16 __U, __m256i __A) +_mm256_maskz_abs_epi16(__mmask16 __U, __m256i __A) { - return (__m256i) __builtin_ia32_pabsw256_mask ((__v16hi) __A, - (__v16hi) _mm256_setzero_si256 (), - (__mmask16) __U); + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_abs_epi16(__A), + (__v16hi)_mm256_setzero_si256()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_packs_epi32 (__mmask8 __M, __m128i __A, __m128i __B) -{ - return (__m128i) __builtin_ia32_packssdw128_mask ((__v4si) __A, - (__v4si) __B, - (__v8hi) _mm_setzero_si128 (), __M); +_mm_maskz_packs_epi32(__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_packs_epi32(__A, __B), + (__v8hi)_mm_setzero_si128()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_packs_epi32 (__m128i __W, __mmask16 __M, __m128i __A, - __m128i __B) +_mm_mask_packs_epi32(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_packssdw128_mask ((__v4si) __A, - (__v4si) __B, - (__v8hi) __W, __M); + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_packs_epi32(__A, __B), + (__v8hi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_packs_epi32 (__mmask16 __M, __m256i __A, __m256i __B) +_mm256_maskz_packs_epi32(__mmask16 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_packssdw256_mask ((__v8si) __A, - (__v8si) __B, - (__v16hi) _mm256_setzero_si256 (), - __M); + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_packs_epi32(__A, __B), + (__v16hi)_mm256_setzero_si256()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_packs_epi32 (__m256i __W, __mmask16 __M, __m256i __A, - __m256i __B) +_mm256_mask_packs_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_packssdw256_mask ((__v8si) __A, - (__v8si) __B, - (__v16hi) __W, __M); + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_packs_epi32(__A, __B), + (__v16hi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_packs_epi16 (__mmask16 __M, __m128i __A, __m128i __B) +_mm_maskz_packs_epi16(__mmask16 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_packsswb128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v16qi) _mm_setzero_si128 (), - __M); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_packs_epi16(__A, __B), + (__v16qi)_mm_setzero_si128()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_packs_epi16 (__m128i __W, __mmask16 __M, __m128i __A, - __m128i __B) +_mm_mask_packs_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_packsswb128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v16qi) __W, - __M); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_packs_epi16(__A, __B), + (__v16qi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_packs_epi16 (__mmask32 __M, __m256i __A, __m256i __B) +_mm256_maskz_packs_epi16(__mmask32 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_packsswb256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v32qi) _mm256_setzero_si256 (), - __M); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_packs_epi16(__A, __B), + (__v32qi)_mm256_setzero_si256()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_packs_epi16 (__m256i __W, __mmask32 __M, __m256i __A, - __m256i __B) +_mm256_mask_packs_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_packsswb256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v32qi) __W, - __M); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_packs_epi16(__A, __B), + (__v32qi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_packus_epi32 (__mmask8 __M, __m128i __A, __m128i __B) +_mm_maskz_packus_epi32(__mmask8 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_packusdw128_mask ((__v4si) __A, - (__v4si) __B, - (__v8hi) _mm_setzero_si128 (), - __M); + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_packus_epi32(__A, __B), + (__v8hi)_mm_setzero_si128()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_packus_epi32 (__m128i __W, __mmask16 __M, __m128i __A, - __m128i __B) +_mm_mask_packus_epi32(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_packusdw128_mask ((__v4si) __A, - (__v4si) __B, - (__v8hi) __W, __M); + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_packus_epi32(__A, __B), + (__v8hi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_packus_epi32 (__mmask16 __M, __m256i __A, __m256i __B) +_mm256_maskz_packus_epi32(__mmask16 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_packusdw256_mask ((__v8si) __A, - (__v8si) __B, - (__v16hi) _mm256_setzero_si256 (), - __M); + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_packus_epi32(__A, __B), + (__v16hi)_mm256_setzero_si256()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_packus_epi32 (__m256i __W, __mmask16 __M, __m256i __A, - __m256i __B) +_mm256_mask_packus_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_packusdw256_mask ((__v8si) __A, - (__v8si) __B, - (__v16hi) __W, - __M); + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_packus_epi32(__A, __B), + (__v16hi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_packus_epi16 (__mmask16 __M, __m128i __A, __m128i __B) +_mm_maskz_packus_epi16(__mmask16 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_packuswb128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v16qi) _mm_setzero_si128 (), - __M); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_packus_epi16(__A, __B), + (__v16qi)_mm_setzero_si128()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_packus_epi16 (__m128i __W, __mmask16 __M, __m128i __A, - __m128i __B) +_mm_mask_packus_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_packuswb128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v16qi) __W, - __M); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_packus_epi16(__A, __B), + (__v16qi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_packus_epi16 (__mmask32 __M, __m256i __A, __m256i __B) +_mm256_maskz_packus_epi16(__mmask32 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_packuswb256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v32qi) _mm256_setzero_si256 (), - __M); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_packus_epi16(__A, __B), + (__v32qi)_mm256_setzero_si256()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_packus_epi16 (__m256i __W, __mmask32 __M, __m256i __A, - __m256i __B) +_mm256_mask_packus_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_packuswb256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v32qi) __W, - __M); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_packus_epi16(__A, __B), + (__v32qi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_adds_epi8 (__m128i __W, __mmask16 __U, __m128i __A, - __m128i __B) +_mm_mask_adds_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_paddsb128_mask ((__v16qi) __A, - (__v16qi) __B, - (__v16qi) __W, - (__mmask16) __U); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_adds_epi8(__A, __B), + (__v16qi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_adds_epi8 (__mmask16 __U, __m128i __A, __m128i __B) +_mm_maskz_adds_epi8(__mmask16 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_paddsb128_mask ((__v16qi) __A, - (__v16qi) __B, - (__v16qi) _mm_setzero_si128 (), - (__mmask16) __U); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_adds_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_adds_epi8 (__m256i __W, __mmask32 __U, __m256i __A, - __m256i __B) +_mm256_mask_adds_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_paddsb256_mask ((__v32qi) __A, - (__v32qi) __B, - (__v32qi) __W, - (__mmask32) __U); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_adds_epi8(__A, __B), + (__v32qi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_adds_epi8 (__mmask32 __U, __m256i __A, __m256i __B) +_mm256_maskz_adds_epi8(__mmask32 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_paddsb256_mask ((__v32qi) __A, - (__v32qi) __B, - (__v32qi) _mm256_setzero_si256 (), - (__mmask32) __U); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_adds_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_adds_epi16 (__m128i __W, __mmask8 __U, __m128i __A, - __m128i __B) +_mm_mask_adds_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_paddsw128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v8hi) __W, - (__mmask8) __U); + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_adds_epi16(__A, __B), + (__v8hi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_adds_epi16 (__mmask8 __U, __m128i __A, __m128i __B) +_mm_maskz_adds_epi16(__mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_paddsw128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v8hi) _mm_setzero_si128 (), - (__mmask8) __U); + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_adds_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_adds_epi16 (__m256i __W, __mmask16 __U, __m256i __A, - __m256i __B) +_mm256_mask_adds_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_paddsw256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v16hi) __W, - (__mmask16) __U); + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_adds_epi16(__A, __B), + (__v16hi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_adds_epi16 (__mmask16 __U, __m256i __A, __m256i __B) +_mm256_maskz_adds_epi16(__mmask16 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_paddsw256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v16hi) _mm256_setzero_si256 (), - (__mmask16) __U); + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_adds_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_adds_epu8 (__m128i __W, __mmask16 __U, __m128i __A, - __m128i __B) +_mm_mask_adds_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_paddusb128_mask ((__v16qi) __A, - (__v16qi) __B, - (__v16qi) __W, - (__mmask16) __U); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_adds_epu8(__A, __B), + (__v16qi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_adds_epu8 (__mmask16 __U, __m128i __A, __m128i __B) +_mm_maskz_adds_epu8(__mmask16 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_paddusb128_mask ((__v16qi) __A, - (__v16qi) __B, - (__v16qi) _mm_setzero_si128 (), - (__mmask16) __U); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_adds_epu8(__A, __B), + (__v16qi)_mm_setzero_si128()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_adds_epu8 (__m256i __W, __mmask32 __U, __m256i __A, - __m256i __B) +_mm256_mask_adds_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_paddusb256_mask ((__v32qi) __A, - (__v32qi) __B, - (__v32qi) __W, - (__mmask32) __U); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_adds_epu8(__A, __B), + (__v32qi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_adds_epu8 (__mmask32 __U, __m256i __A, __m256i __B) +_mm256_maskz_adds_epu8(__mmask32 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_paddusb256_mask ((__v32qi) __A, - (__v32qi) __B, - (__v32qi) _mm256_setzero_si256 (), - (__mmask32) __U); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_adds_epu8(__A, __B), + (__v32qi)_mm256_setzero_si256()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_adds_epu16 (__m128i __W, __mmask8 __U, __m128i __A, - __m128i __B) +_mm_mask_adds_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_paddusw128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v8hi) __W, - (__mmask8) __U); + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_adds_epu16(__A, __B), + (__v8hi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_adds_epu16 (__mmask8 __U, __m128i __A, __m128i __B) +_mm_maskz_adds_epu16(__mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_paddusw128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v8hi) _mm_setzero_si128 (), - (__mmask8) __U); + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_adds_epu16(__A, __B), + (__v8hi)_mm_setzero_si128()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_adds_epu16 (__m256i __W, __mmask16 __U, __m256i __A, - __m256i __B) +_mm256_mask_adds_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_paddusw256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v16hi) __W, - (__mmask16) __U); + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_adds_epu16(__A, __B), + (__v16hi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_adds_epu16 (__mmask16 __U, __m256i __A, __m256i __B) +_mm256_maskz_adds_epu16(__mmask16 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_paddusw256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v16hi) _mm256_setzero_si256 (), - (__mmask16) __U); + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_adds_epu16(__A, __B), + (__v16hi)_mm256_setzero_si256()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_avg_epu8 (__m128i __W, __mmask16 __U, __m128i __A, - __m128i __B) +_mm_mask_avg_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pavgb128_mask ((__v16qi) __A, - (__v16qi) __B, - (__v16qi) __W, - (__mmask16) __U); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_avg_epu8(__A, __B), + (__v16qi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_avg_epu8 (__mmask16 __U, __m128i __A, __m128i __B) +_mm_maskz_avg_epu8(__mmask16 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pavgb128_mask ((__v16qi) __A, - (__v16qi) __B, - (__v16qi) _mm_setzero_si128 (), - (__mmask16) __U); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_avg_epu8(__A, __B), + (__v16qi)_mm_setzero_si128()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_avg_epu8 (__m256i __W, __mmask32 __U, __m256i __A, - __m256i __B) +_mm256_mask_avg_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pavgb256_mask ((__v32qi) __A, - (__v32qi) __B, - (__v32qi) __W, - (__mmask32) __U); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_avg_epu8(__A, __B), + (__v32qi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_avg_epu8 (__mmask32 __U, __m256i __A, __m256i __B) +_mm256_maskz_avg_epu8(__mmask32 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pavgb256_mask ((__v32qi) __A, - (__v32qi) __B, - (__v32qi) _mm256_setzero_si256 (), - (__mmask32) __U); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_avg_epu8(__A, __B), + (__v32qi)_mm256_setzero_si256()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_avg_epu16 (__m128i __W, __mmask8 __U, __m128i __A, - __m128i __B) +_mm_mask_avg_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pavgw128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v8hi) __W, - (__mmask8) __U); + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_avg_epu16(__A, __B), + (__v8hi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_avg_epu16 (__mmask8 __U, __m128i __A, __m128i __B) +_mm_maskz_avg_epu16(__mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pavgw128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v8hi) _mm_setzero_si128 (), - (__mmask8) __U); + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_avg_epu16(__A, __B), + (__v8hi)_mm_setzero_si128()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_avg_epu16 (__m256i __W, __mmask16 __U, __m256i __A, - __m256i __B) +_mm256_mask_avg_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pavgw256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v16hi) __W, - (__mmask16) __U); + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_avg_epu16(__A, __B), + (__v16hi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_avg_epu16 (__mmask16 __U, __m256i __A, __m256i __B) +_mm256_maskz_avg_epu16(__mmask16 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pavgw256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v16hi) _mm256_setzero_si256 (), - (__mmask16) __U); + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_avg_epu16(__A, __B), + (__v16hi)_mm256_setzero_si256()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_max_epi8 (__mmask16 __M, __m128i __A, __m128i __B) +_mm_maskz_max_epi8(__mmask16 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pmaxsb128_mask ((__v16qi) __A, - (__v16qi) __B, - (__v16qi) _mm_setzero_si128 (), - (__mmask16) __M); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_max_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_max_epi8 (__m128i __W, __mmask16 __M, __m128i __A, - __m128i __B) +_mm_mask_max_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pmaxsb128_mask ((__v16qi) __A, - (__v16qi) __B, - (__v16qi) __W, - (__mmask16) __M); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_max_epi8(__A, __B), + (__v16qi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_max_epi8 (__mmask32 __M, __m256i __A, __m256i __B) +_mm256_maskz_max_epi8(__mmask32 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pmaxsb256_mask ((__v32qi) __A, - (__v32qi) __B, - (__v32qi) _mm256_setzero_si256 (), - (__mmask32) __M); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_max_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_max_epi8 (__m256i __W, __mmask32 __M, __m256i __A, - __m256i __B) +_mm256_mask_max_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pmaxsb256_mask ((__v32qi) __A, - (__v32qi) __B, - (__v32qi) __W, - (__mmask32) __M); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_max_epi8(__A, __B), + (__v32qi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_max_epi16 (__mmask8 __M, __m128i __A, __m128i __B) +_mm_maskz_max_epi16(__mmask8 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pmaxsw128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v8hi) _mm_setzero_si128 (), - (__mmask8) __M); + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_max_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_max_epi16 (__m128i __W, __mmask8 __M, __m128i __A, - __m128i __B) +_mm_mask_max_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pmaxsw128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v8hi) __W, - (__mmask8) __M); + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_max_epi16(__A, __B), + (__v8hi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_max_epi16 (__mmask16 __M, __m256i __A, __m256i __B) +_mm256_maskz_max_epi16(__mmask16 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pmaxsw256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v16hi) _mm256_setzero_si256 (), - (__mmask16) __M); + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_max_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_max_epi16 (__m256i __W, __mmask16 __M, __m256i __A, - __m256i __B) +_mm256_mask_max_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pmaxsw256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v16hi) __W, - (__mmask16) __M); + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_max_epi16(__A, __B), + (__v16hi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_max_epu8 (__mmask16 __M, __m128i __A, __m128i __B) +_mm_maskz_max_epu8(__mmask16 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pmaxub128_mask ((__v16qi) __A, - (__v16qi) __B, - (__v16qi) _mm_setzero_si128 (), - (__mmask16) __M); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_max_epu8(__A, __B), + (__v16qi)_mm_setzero_si128()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_max_epu8 (__m128i __W, __mmask16 __M, __m128i __A, - __m128i __B) +_mm_mask_max_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pmaxub128_mask ((__v16qi) __A, - (__v16qi) __B, - (__v16qi) __W, - (__mmask16) __M); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_max_epu8(__A, __B), + (__v16qi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_maskz_max_epu8 (__mmask32 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pmaxub256_mask ((__v32qi) __A, - (__v32qi) __B, - (__v32qi) _mm256_setzero_si256 (), - (__mmask32) __M); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_max_epu8(__A, __B), + (__v32qi)_mm256_setzero_si256()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_max_epu8 (__m256i __W, __mmask32 __M, __m256i __A, - __m256i __B) +_mm256_mask_max_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pmaxub256_mask ((__v32qi) __A, - (__v32qi) __B, - (__v32qi) __W, - (__mmask32) __M); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_max_epu8(__A, __B), + (__v32qi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_max_epu16 (__mmask8 __M, __m128i __A, __m128i __B) +_mm_maskz_max_epu16(__mmask8 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pmaxuw128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v8hi) _mm_setzero_si128 (), - (__mmask8) __M); + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_max_epu16(__A, __B), + (__v8hi)_mm_setzero_si128()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_max_epu16 (__m128i __W, __mmask8 __M, __m128i __A, - __m128i __B) +_mm_mask_max_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pmaxuw128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v8hi) __W, - (__mmask8) __M); + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_max_epu16(__A, __B), + (__v8hi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_max_epu16 (__mmask16 __M, __m256i __A, __m256i __B) +_mm256_maskz_max_epu16(__mmask16 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pmaxuw256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v16hi) _mm256_setzero_si256 (), - (__mmask16) __M); + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_max_epu16(__A, __B), + (__v16hi)_mm256_setzero_si256()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_max_epu16 (__m256i __W, __mmask16 __M, __m256i __A, - __m256i __B) +_mm256_mask_max_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pmaxuw256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v16hi) __W, - (__mmask16) __M); + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_max_epu16(__A, __B), + (__v16hi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_min_epi8 (__mmask16 __M, __m128i __A, __m128i __B) +_mm_maskz_min_epi8(__mmask16 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pminsb128_mask ((__v16qi) __A, - (__v16qi) __B, - (__v16qi) _mm_setzero_si128 (), - (__mmask16) __M); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_min_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_min_epi8 (__m128i __W, __mmask16 __M, __m128i __A, - __m128i __B) +_mm_mask_min_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pminsb128_mask ((__v16qi) __A, - (__v16qi) __B, - (__v16qi) __W, - (__mmask16) __M); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_min_epi8(__A, __B), + (__v16qi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_min_epi8 (__mmask32 __M, __m256i __A, __m256i __B) +_mm256_maskz_min_epi8(__mmask32 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pminsb256_mask ((__v32qi) __A, - (__v32qi) __B, - (__v32qi) _mm256_setzero_si256 (), - (__mmask32) __M); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_min_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_min_epi8 (__m256i __W, __mmask32 __M, __m256i __A, - __m256i __B) +_mm256_mask_min_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pminsb256_mask ((__v32qi) __A, - (__v32qi) __B, - (__v32qi) __W, - (__mmask32) __M); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_min_epi8(__A, __B), + (__v32qi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_min_epi16 (__mmask8 __M, __m128i __A, __m128i __B) +_mm_maskz_min_epi16(__mmask8 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pminsw128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v8hi) _mm_setzero_si128 (), - (__mmask8) __M); + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_min_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_min_epi16 (__m128i __W, __mmask8 __M, __m128i __A, - __m128i __B) +_mm_mask_min_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pminsw128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v8hi) __W, - (__mmask8) __M); + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_min_epi16(__A, __B), + (__v8hi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_min_epi16 (__mmask16 __M, __m256i __A, __m256i __B) +_mm256_maskz_min_epi16(__mmask16 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pminsw256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v16hi) _mm256_setzero_si256 (), - (__mmask16) __M); + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_min_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_min_epi16 (__m256i __W, __mmask16 __M, __m256i __A, - __m256i __B) +_mm256_mask_min_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pminsw256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v16hi) __W, - (__mmask16) __M); + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_min_epi16(__A, __B), + (__v16hi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_min_epu8 (__mmask16 __M, __m128i __A, __m128i __B) +_mm_maskz_min_epu8(__mmask16 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pminub128_mask ((__v16qi) __A, - (__v16qi) __B, - (__v16qi) _mm_setzero_si128 (), - (__mmask16) __M); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_min_epu8(__A, __B), + (__v16qi)_mm_setzero_si128()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_min_epu8 (__m128i __W, __mmask16 __M, __m128i __A, - __m128i __B) +_mm_mask_min_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pminub128_mask ((__v16qi) __A, - (__v16qi) __B, - (__v16qi) __W, - (__mmask16) __M); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_min_epu8(__A, __B), + (__v16qi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_maskz_min_epu8 (__mmask32 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pminub256_mask ((__v32qi) __A, - (__v32qi) __B, - (__v32qi) _mm256_setzero_si256 (), - (__mmask32) __M); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_min_epu8(__A, __B), + (__v32qi)_mm256_setzero_si256()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_min_epu8 (__m256i __W, __mmask32 __M, __m256i __A, - __m256i __B) +_mm256_mask_min_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pminub256_mask ((__v32qi) __A, - (__v32qi) __B, - (__v32qi) __W, - (__mmask32) __M); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_min_epu8(__A, __B), + (__v32qi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_min_epu16 (__mmask8 __M, __m128i __A, __m128i __B) +_mm_maskz_min_epu16(__mmask8 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pminuw128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v8hi) _mm_setzero_si128 (), - (__mmask8) __M); + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_min_epu16(__A, __B), + (__v8hi)_mm_setzero_si128()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_min_epu16 (__m128i __W, __mmask8 __M, __m128i __A, - __m128i __B) +_mm_mask_min_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pminuw128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v8hi) __W, - (__mmask8) __M); + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_min_epu16(__A, __B), + (__v8hi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_min_epu16 (__mmask16 __M, __m256i __A, __m256i __B) +_mm256_maskz_min_epu16(__mmask16 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pminuw256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v16hi) _mm256_setzero_si256 (), - (__mmask16) __M); + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_min_epu16(__A, __B), + (__v16hi)_mm256_setzero_si256()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_min_epu16 (__m256i __W, __mmask16 __M, __m256i __A, - __m256i __B) +_mm256_mask_min_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pminuw256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v16hi) __W, - (__mmask16) __M); + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_min_epu16(__A, __B), + (__v16hi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_shuffle_epi8 (__m128i __W, __mmask16 __U, __m128i __A, - __m128i __B) +_mm_mask_shuffle_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pshufb128_mask ((__v16qi) __A, - (__v16qi) __B, - (__v16qi) __W, - (__mmask16) __U); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_shuffle_epi8(__A, __B), + (__v16qi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_shuffle_epi8 (__mmask16 __U, __m128i __A, __m128i __B) +_mm_maskz_shuffle_epi8(__mmask16 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pshufb128_mask ((__v16qi) __A, - (__v16qi) __B, - (__v16qi) _mm_setzero_si128 (), - (__mmask16) __U); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_shuffle_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_shuffle_epi8 (__m256i __W, __mmask32 __U, __m256i __A, - __m256i __B) +_mm256_mask_shuffle_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pshufb256_mask ((__v32qi) __A, - (__v32qi) __B, - (__v32qi) __W, - (__mmask32) __U); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_shuffle_epi8(__A, __B), + (__v32qi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_shuffle_epi8 (__mmask32 __U, __m256i __A, __m256i __B) +_mm256_maskz_shuffle_epi8(__mmask32 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pshufb256_mask ((__v32qi) __A, - (__v32qi) __B, - (__v32qi) _mm256_setzero_si256 (), - (__mmask32) __U); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_shuffle_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_subs_epi8 (__m128i __W, __mmask16 __U, __m128i __A, - __m128i __B) +_mm_mask_subs_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_psubsb128_mask ((__v16qi) __A, - (__v16qi) __B, - (__v16qi) __W, - (__mmask16) __U); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_subs_epi8(__A, __B), + (__v16qi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_subs_epi8 (__mmask16 __U, __m128i __A, __m128i __B) +_mm_maskz_subs_epi8(__mmask16 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_psubsb128_mask ((__v16qi) __A, - (__v16qi) __B, - (__v16qi) _mm_setzero_si128 (), - (__mmask16) __U); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_subs_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_subs_epi8 (__m256i __W, __mmask32 __U, __m256i __A, - __m256i __B) +_mm256_mask_subs_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_psubsb256_mask ((__v32qi) __A, - (__v32qi) __B, - (__v32qi) __W, - (__mmask32) __U); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_subs_epi8(__A, __B), + (__v32qi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_subs_epi8 (__mmask32 __U, __m256i __A, __m256i __B) +_mm256_maskz_subs_epi8(__mmask32 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_psubsb256_mask ((__v32qi) __A, - (__v32qi) __B, - (__v32qi) _mm256_setzero_si256 (), - (__mmask32) __U); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_subs_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_subs_epi16 (__m128i __W, __mmask8 __U, __m128i __A, - __m128i __B) +_mm_mask_subs_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_psubsw128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v8hi) __W, - (__mmask8) __U); + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_subs_epi16(__A, __B), + (__v8hi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_subs_epi16 (__mmask8 __U, __m128i __A, __m128i __B) +_mm_maskz_subs_epi16(__mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_psubsw128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v8hi) _mm_setzero_si128 (), - (__mmask8) __U); + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_subs_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_subs_epi16 (__m256i __W, __mmask16 __U, __m256i __A, - __m256i __B) +_mm256_mask_subs_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_psubsw256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v16hi) __W, - (__mmask16) __U); + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_subs_epi16(__A, __B), + (__v16hi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_subs_epi16 (__mmask16 __U, __m256i __A, __m256i __B) +_mm256_maskz_subs_epi16(__mmask16 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_psubsw256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v16hi) _mm256_setzero_si256 (), - (__mmask16) __U); + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_subs_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_subs_epu8 (__m128i __W, __mmask16 __U, __m128i __A, - __m128i __B) +_mm_mask_subs_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_psubusb128_mask ((__v16qi) __A, - (__v16qi) __B, - (__v16qi) __W, - (__mmask16) __U); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_subs_epu8(__A, __B), + (__v16qi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_subs_epu8 (__mmask16 __U, __m128i __A, __m128i __B) +_mm_maskz_subs_epu8(__mmask16 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_psubusb128_mask ((__v16qi) __A, - (__v16qi) __B, - (__v16qi) _mm_setzero_si128 (), - (__mmask16) __U); + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_subs_epu8(__A, __B), + (__v16qi)_mm_setzero_si128()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_subs_epu8 (__m256i __W, __mmask32 __U, __m256i __A, - __m256i __B) +_mm256_mask_subs_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_psubusb256_mask ((__v32qi) __A, - (__v32qi) __B, - (__v32qi) __W, - (__mmask32) __U); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_subs_epu8(__A, __B), + (__v32qi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_subs_epu8 (__mmask32 __U, __m256i __A, __m256i __B) +_mm256_maskz_subs_epu8(__mmask32 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_psubusb256_mask ((__v32qi) __A, - (__v32qi) __B, - (__v32qi) _mm256_setzero_si256 (), - (__mmask32) __U); + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_subs_epu8(__A, __B), + (__v32qi)_mm256_setzero_si256()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_subs_epu16 (__m128i __W, __mmask8 __U, __m128i __A, - __m128i __B) +_mm_mask_subs_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_psubusw128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v8hi) __W, - (__mmask8) __U); + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_subs_epu16(__A, __B), + (__v8hi)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_subs_epu16 (__mmask8 __U, __m128i __A, __m128i __B) +_mm_maskz_subs_epu16(__mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_psubusw128_mask ((__v8hi) __A, - (__v8hi) __B, - (__v8hi) _mm_setzero_si128 (), - (__mmask8) __U); + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_subs_epu16(__A, __B), + (__v8hi)_mm_setzero_si128()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_subs_epu16 (__m256i __W, __mmask16 __U, __m256i __A, - __m256i __B) -{ - return (__m256i) __builtin_ia32_psubusw256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v16hi) __W, - (__mmask16) __U); +_mm256_mask_subs_epu16(__m256i __W, __mmask16 __U, __m256i __A, + __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_subs_epu16(__A, __B), + (__v16hi)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_subs_epu16 (__mmask16 __U, __m256i __A, __m256i __B) +_mm256_maskz_subs_epu16(__mmask16 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_psubusw256_mask ((__v16hi) __A, - (__v16hi) __B, - (__v16hi) _mm256_setzero_si256 (), - (__mmask16) __U); + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_subs_epu16(__A, __B), + (__v16hi)_mm256_setzero_si256()); } static __inline__ __m128i __DEFAULT_FN_ATTRS @@ -1822,85 +1663,1509 @@ _mm256_maskz_permutex2var_epi16 (__mmask16 __U, __m256i __A, (__mmask16) __U); } +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_maddubs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_maddubs_epi16(__X, __Y), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_maddubs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_maddubs_epi16(__X, __Y), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_maddubs_epi16(__m256i __W, __mmask16 __U, __m256i __X, + __m256i __Y) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_maddubs_epi16(__X, __Y), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_maddubs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_maddubs_epi16(__X, __Y), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_madd_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_madd_epi16(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_madd_epi16(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_madd_epi16(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_madd_epi16(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_madd_epi16(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_madd_epi16(__mmask8 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_madd_epi16(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtsepi16_epi8 (__m128i __A) { + return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A, + (__v16qi) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtsepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) { + return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtsepi16_epi8 (__mmask8 __M, __m128i __A) { + return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A, + (__v16qi) _mm_setzero_si128(), + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_cvtsepi16_epi8 (__m256i __A) { + return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A, + (__v16qi) _mm_setzero_si128(), + (__mmask16) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_mask_cvtsepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) { + return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtsepi16_epi8 (__mmask16 __M, __m256i __A) { + return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A, + (__v16qi) _mm_setzero_si128(), + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtusepi16_epi8 (__m128i __A) { + return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A, + (__v16qi) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtusepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) { + return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtusepi16_epi8 (__mmask8 __M, __m128i __A) { + return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A, + (__v16qi) _mm_setzero_si128(), + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_cvtusepi16_epi8 (__m256i __A) { + return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A, + (__v16qi) _mm_setzero_si128(), + (__mmask16) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_mask_cvtusepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) { + return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtusepi16_epi8 (__mmask16 __M, __m256i __A) { + return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A, + (__v16qi) _mm_setzero_si128(), + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtepi16_epi8 (__m128i __A) { + + return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A, + (__v16qi) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) { + return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtepi16_epi8 (__mmask8 __M, __m128i __A) { + return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A, + (__v16qi) _mm_setzero_si128(), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_cvtepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovwb128mem_mask ((__v16qi *) __P, (__v8hi) __A, __M); +} + + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovswb128mem_mask ((__v16qi *) __P, (__v8hi) __A, __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovuswb128mem_mask ((__v16qi *) __P, (__v8hi) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_cvtepi16_epi8 (__m256i __A) { + return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A, + (__v16qi) _mm_setzero_si128(), + (__mmask16) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_mask_cvtepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) { + return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepi16_epi8 (__mmask16 __M, __m256i __A) { + return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A, + (__v16qi) _mm_setzero_si128(), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_cvtepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A) +{ + __builtin_ia32_pmovwb256mem_mask ((__v16qi *) __P, (__v16hi) __A, __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A) +{ + __builtin_ia32_pmovswb256mem_mask ((__v16qi *) __P, (__v16hi) __A, __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovuswb256mem_mask ((__v16qi*) __P, (__v16hi) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_mulhrs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_mulhrs_epi16(__X, __Y), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_mulhrs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_mulhrs_epi16(__X, __Y), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_mulhrs_epi16(__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_mulhrs_epi16(__X, __Y), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_mulhrs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_mulhrs_epi16(__X, __Y), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_mulhi_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_mulhi_epu16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_mulhi_epu16(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_mulhi_epu16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_mulhi_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_mulhi_epu16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_mulhi_epu16(__mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_mulhi_epu16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_mulhi_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_mulhi_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_mulhi_epi16(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_mulhi_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_mulhi_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_mulhi_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_mulhi_epi16(__mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_mulhi_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_unpackhi_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_unpackhi_epi8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_unpackhi_epi8(__mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_unpackhi_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_unpackhi_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_unpackhi_epi8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_unpackhi_epi8(__mmask32 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_unpackhi_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_unpackhi_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_unpackhi_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_unpackhi_epi16(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_unpackhi_epi16(__A, __B), + (__v8hi) _mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_unpackhi_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_unpackhi_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_unpackhi_epi16(__mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_unpackhi_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_unpacklo_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_unpacklo_epi8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_unpacklo_epi8(__mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_unpacklo_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_unpacklo_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_unpacklo_epi8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_unpacklo_epi8(__mmask32 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_unpacklo_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_unpacklo_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_unpacklo_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_unpacklo_epi16(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_unpacklo_epi16(__A, __B), + (__v8hi) _mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_unpacklo_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_unpacklo_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_unpacklo_epi16(__mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_unpacklo_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtepi8_epi16(__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_cvtepi8_epi16(__A), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtepi8_epi16(__mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_cvtepi8_epi16(__A), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_cvtepi8_epi16(__m256i __W, __mmask16 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_cvtepi8_epi16(__A), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepi8_epi16(__mmask16 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_cvtepi8_epi16(__A), + (__v16hi)_mm256_setzero_si256()); +} + + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtepu8_epi16(__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_cvtepu8_epi16(__A), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtepu8_epi16(__mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_cvtepu8_epi16(__A), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_cvtepu8_epi16(__m256i __W, __mmask16 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_cvtepu8_epi16(__A), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepu8_epi16 (__mmask16 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_cvtepu8_epi16(__A), + (__v16hi)_mm256_setzero_si256()); +} + + #define _mm_cmp_epi8_mask(a, b, p) __extension__ ({ \ (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \ - (__v16qi)(__m128i)(b), \ - (p), (__mmask16)-1); }) + (__v16qi)(__m128i)(b), (int)(p), \ + (__mmask16)-1); }) #define _mm_mask_cmp_epi8_mask(m, a, b, p) __extension__ ({ \ (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \ - (__v16qi)(__m128i)(b), \ - (p), (__mmask16)(m)); }) + (__v16qi)(__m128i)(b), (int)(p), \ + (__mmask16)(m)); }) #define _mm_cmp_epu8_mask(a, b, p) __extension__ ({ \ (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \ - (__v16qi)(__m128i)(b), \ - (p), (__mmask16)-1); }) + (__v16qi)(__m128i)(b), (int)(p), \ + (__mmask16)-1); }) #define _mm_mask_cmp_epu8_mask(m, a, b, p) __extension__ ({ \ (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \ - (__v16qi)(__m128i)(b), \ - (p), (__mmask16)(m)); }) + (__v16qi)(__m128i)(b), (int)(p), \ + (__mmask16)(m)); }) #define _mm256_cmp_epi8_mask(a, b, p) __extension__ ({ \ (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \ - (__v32qi)(__m256i)(b), \ - (p), (__mmask32)-1); }) + (__v32qi)(__m256i)(b), (int)(p), \ + (__mmask32)-1); }) #define _mm256_mask_cmp_epi8_mask(m, a, b, p) __extension__ ({ \ (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \ - (__v32qi)(__m256i)(b), \ - (p), (__mmask32)(m)); }) + (__v32qi)(__m256i)(b), (int)(p), \ + (__mmask32)(m)); }) #define _mm256_cmp_epu8_mask(a, b, p) __extension__ ({ \ (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \ - (__v32qi)(__m256i)(b), \ - (p), (__mmask32)-1); }) + (__v32qi)(__m256i)(b), (int)(p), \ + (__mmask32)-1); }) #define _mm256_mask_cmp_epu8_mask(m, a, b, p) __extension__ ({ \ (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \ - (__v32qi)(__m256i)(b), \ - (p), (__mmask32)(m)); }) + (__v32qi)(__m256i)(b), (int)(p), \ + (__mmask32)(m)); }) #define _mm_cmp_epi16_mask(a, b, p) __extension__ ({ \ (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \ - (__v8hi)(__m128i)(b), \ - (p), (__mmask8)-1); }) + (__v8hi)(__m128i)(b), (int)(p), \ + (__mmask8)-1); }) #define _mm_mask_cmp_epi16_mask(m, a, b, p) __extension__ ({ \ (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \ - (__v8hi)(__m128i)(b), \ - (p), (__mmask8)(m)); }) + (__v8hi)(__m128i)(b), (int)(p), \ + (__mmask8)(m)); }) #define _mm_cmp_epu16_mask(a, b, p) __extension__ ({ \ (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \ - (__v8hi)(__m128i)(b), \ - (p), (__mmask8)-1); }) + (__v8hi)(__m128i)(b), (int)(p), \ + (__mmask8)-1); }) #define _mm_mask_cmp_epu16_mask(m, a, b, p) __extension__ ({ \ (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \ - (__v8hi)(__m128i)(b), \ - (p), (__mmask8)(m)); }) + (__v8hi)(__m128i)(b), (int)(p), \ + (__mmask8)(m)); }) #define _mm256_cmp_epi16_mask(a, b, p) __extension__ ({ \ (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \ - (__v16hi)(__m256i)(b), \ - (p), (__mmask16)-1); }) + (__v16hi)(__m256i)(b), (int)(p), \ + (__mmask16)-1); }) #define _mm256_mask_cmp_epi16_mask(m, a, b, p) __extension__ ({ \ (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \ - (__v16hi)(__m256i)(b), \ - (p), (__mmask16)(m)); }) + (__v16hi)(__m256i)(b), (int)(p), \ + (__mmask16)(m)); }) #define _mm256_cmp_epu16_mask(a, b, p) __extension__ ({ \ (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \ - (__v16hi)(__m256i)(b), \ - (p), (__mmask16)-1); }) + (__v16hi)(__m256i)(b), (int)(p), \ + (__mmask16)-1); }) #define _mm256_mask_cmp_epu16_mask(m, a, b, p) __extension__ ({ \ (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \ - (__v16hi)(__m256i)(b), \ - (p), (__mmask16)(m)); }) + (__v16hi)(__m256i)(b), (int)(p), \ + (__mmask16)(m)); }) + +#define _mm_mask_shufflehi_epi16(W, U, A, imm) __extension__ ({ \ + (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ + (__v8hi)_mm_shufflehi_epi16((A), (imm)), \ + (__v8hi)(__m128i)(W)); }) + +#define _mm_maskz_shufflehi_epi16(U, A, imm) __extension__ ({ \ + (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ + (__v8hi)_mm_shufflehi_epi16((A), (imm)), \ + (__v8hi)_mm_setzero_hi()); }) + +#define _mm256_mask_shufflehi_epi16(W, U, A, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ + (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \ + (__v16hi)(__m256i)(W)); }) + +#define _mm256_maskz_shufflehi_epi16(U, A, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ + (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \ + (__v16hi)_mm256_setzero_si256()); }) + +#define _mm_mask_shufflelo_epi16(W, U, A, imm) __extension__ ({ \ + (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ + (__v8hi)_mm_shufflelo_epi16((A), (imm)), \ + (__v8hi)(__m128i)(W)); }) + +#define _mm_maskz_shufflelo_epi16(U, A, imm) __extension__ ({ \ + (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ + (__v8hi)_mm_shufflelo_epi16((A), (imm)), \ + (__v8hi)_mm_setzero_hi()); }) + +#define _mm256_mask_shufflelo_epi16(W, U, A, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ + (__v16hi)_mm256_shufflelo_epi16((A), \ + (imm)), \ + (__v16hi)(__m256i)(W)); }) + +#define _mm256_maskz_shufflelo_epi16(U, A, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ + (__v16hi)_mm256_shufflelo_epi16((A), \ + (imm)), \ + (__v16hi)_mm256_setzero_si256()); }) + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_sllv_epi16(__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_psllv16hi((__v16hi)__A, (__v16hi)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_sllv_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_sllv_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_sllv_epi16(__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_sllv_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sllv_epi16(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psllv8hi((__v8hi)__A, (__v8hi)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_sllv_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_sllv_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_sllv_epi16(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_sllv_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_sll_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_sll_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_sll_epi16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_sll_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_sll_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_sll_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_sll_epi16(__mmask16 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_sll_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_slli_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_slli_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_slli_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_slli_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_slli_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_srlv_epi16(__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_psrlv16hi((__v16hi)__A, (__v16hi)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_srlv_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srlv_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_srlv_epi16(__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srlv_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_srlv_epi16(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrlv8hi((__v8hi)__A, (__v8hi)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_srlv_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srlv_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_srlv_epi16(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srlv_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_srav_epi16(__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_psrav16hi((__v16hi)__A, (__v16hi)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_srav_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srav_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_srav_epi16(__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srav_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_srav_epi16(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrav8hi((__v8hi)__A, (__v8hi)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_srav_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srav_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_srav_epi16(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srav_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_sra_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_sra_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_sra_epi16(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_sra_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_sra_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_sra_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_sra_epi16(__mmask16 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_sra_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srai_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srai_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_srai_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srai_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srai_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_srl_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srl_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_srl_epi16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srl_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_srl_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srl_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_srl_epi16(__mmask16 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srl_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_srli_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srli_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_srli_epi16 (__mmask8 __U, __m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srli_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_srli_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srli_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_srli_epi16(__mmask16 __U, __m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srli_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_mov_epi16 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U, + (__v8hi) __A, + (__v8hi) __W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_mov_epi16 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U, + (__v8hi) __A, + (__v8hi) _mm_setzero_hi ()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_mov_epi16 (__m256i __W, __mmask16 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U, + (__v16hi) __A, + (__v16hi) __W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_mov_epi16 (__mmask16 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U, + (__v16hi) __A, + (__v16hi) _mm256_setzero_si256 ()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_mov_epi8 (__m128i __W, __mmask16 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U, + (__v16qi) __A, + (__v16qi) __W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_mov_epi8 (__mmask16 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U, + (__v16qi) __A, + (__v16qi) _mm_setzero_hi ()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_mov_epi8 (__m256i __W, __mmask32 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U, + (__v32qi) __A, + (__v32qi) __W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_mov_epi8 (__mmask32 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U, + (__v32qi) __A, + (__v32qi) _mm256_setzero_si256 ()); +} + + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_set1_epi8 (__m128i __O, __mmask16 __M, char __A) +{ + return (__m128i) __builtin_ia32_pbroadcastb128_gpr_mask (__A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_set1_epi8 (__mmask16 __M, char __A) +{ + return (__m128i) __builtin_ia32_pbroadcastb128_gpr_mask (__A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_set1_epi8 (__m256i __O, __mmask32 __M, char __A) +{ + return (__m256i) __builtin_ia32_pbroadcastb256_gpr_mask (__A, + (__v32qi) __O, + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_set1_epi8 (__mmask32 __M, char __A) +{ + return (__m256i) __builtin_ia32_pbroadcastb256_gpr_mask (__A, + (__v32qi) + _mm256_setzero_si256 (), + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_loadu_epi16 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddquhi128_mask ((__v8hi *) __P, + (__v8hi) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_loadu_epi16 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddquhi128_mask ((__v8hi *) __P, + (__v8hi) + _mm_setzero_hi (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_loadu_epi16 (__m256i __W, __mmask16 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddquhi256_mask ((__v16hi *) __P, + (__v16hi) __W, + (__mmask16) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_loadu_epi16 (__mmask16 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddquhi256_mask ((__v16hi *) __P, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_loadu_epi8 (__m128i __W, __mmask16 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddquqi128_mask ((__v16qi *) __P, + (__v16qi) __W, + (__mmask16) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_loadu_epi8 (__mmask16 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddquqi128_mask ((__v16qi *) __P, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_loadu_epi8 (__m256i __W, __mmask32 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddquqi256_mask ((__v32qi *) __P, + (__v32qi) __W, + (__mmask32) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_loadu_epi8 (__mmask32 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddquqi256_mask ((__v32qi *) __P, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __U); +} +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_storeu_epi16 (void *__P, __mmask8 __U, __m128i __A) +{ + __builtin_ia32_storedquhi128_mask ((__v8hi *) __P, + (__v8hi) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_storeu_epi16 (void *__P, __mmask16 __U, __m256i __A) +{ + __builtin_ia32_storedquhi256_mask ((__v16hi *) __P, + (__v16hi) __A, + (__mmask16) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_storeu_epi8 (void *__P, __mmask16 __U, __m128i __A) +{ + __builtin_ia32_storedquqi128_mask ((__v16qi *) __P, + (__v16qi) __A, + (__mmask16) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_storeu_epi8 (void *__P, __mmask32 __U, __m256i __A) +{ + __builtin_ia32_storedquqi256_mask ((__v32qi *) __P, + (__v32qi) __A, + (__mmask32) __U); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm_test_epi8_mask (__m128i __A, __m128i __B) +{ + return (__mmask16) __builtin_ia32_ptestmb128 ((__v16qi) __A, + (__v16qi) __B, + (__mmask16) -1); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm_mask_test_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__mmask16) __builtin_ia32_ptestmb128 ((__v16qi) __A, + (__v16qi) __B, __U); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_mm256_test_epi8_mask (__m256i __A, __m256i __B) +{ + return (__mmask32) __builtin_ia32_ptestmb256 ((__v32qi) __A, + (__v32qi) __B, + (__mmask32) -1); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_mm256_mask_test_epi8_mask (__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__mmask32) __builtin_ia32_ptestmb256 ((__v32qi) __A, + (__v32qi) __B, __U); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm_test_epi16_mask (__m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ptestmw128 ((__v8hi) __A, + (__v8hi) __B, + (__mmask8) -1); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm_mask_test_epi16_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ptestmw128 ((__v8hi) __A, + (__v8hi) __B, __U); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm256_test_epi16_mask (__m256i __A, __m256i __B) +{ + return (__mmask16) __builtin_ia32_ptestmw256 ((__v16hi) __A, + (__v16hi) __B, + (__mmask16) -1); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm256_mask_test_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__mmask16) __builtin_ia32_ptestmw256 ((__v16hi) __A, + (__v16hi) __B, __U); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm_testn_epi8_mask (__m128i __A, __m128i __B) +{ + return (__mmask16) __builtin_ia32_ptestnmb128 ((__v16qi) __A, + (__v16qi) __B, + (__mmask16) -1); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm_mask_testn_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__mmask16) __builtin_ia32_ptestnmb128 ((__v16qi) __A, + (__v16qi) __B, __U); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_mm256_testn_epi8_mask (__m256i __A, __m256i __B) +{ + return (__mmask32) __builtin_ia32_ptestnmb256 ((__v32qi) __A, + (__v32qi) __B, + (__mmask32) -1); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_mm256_mask_testn_epi8_mask (__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__mmask32) __builtin_ia32_ptestnmb256 ((__v32qi) __A, + (__v32qi) __B, __U); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm_testn_epi16_mask (__m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ptestnmw128 ((__v8hi) __A, + (__v8hi) __B, + (__mmask8) -1); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm_mask_testn_epi16_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ptestnmw128 ((__v8hi) __A, + (__v8hi) __B, __U); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm256_testn_epi16_mask (__m256i __A, __m256i __B) +{ + return (__mmask16) __builtin_ia32_ptestnmw256 ((__v16hi) __A, + (__v16hi) __B, + (__mmask16) -1); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm256_mask_testn_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__mmask16) __builtin_ia32_ptestnmw256 ((__v16hi) __A, + (__v16hi) __B, __U); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm_movepi8_mask (__m128i __A) +{ + return (__mmask16) __builtin_ia32_cvtb2mask128 ((__v16qi) __A); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_mm256_movepi8_mask (__m256i __A) +{ + return (__mmask32) __builtin_ia32_cvtb2mask256 ((__v32qi) __A); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm_movepi16_mask (__m128i __A) +{ + return (__mmask8) __builtin_ia32_cvtw2mask128 ((__v8hi) __A); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm256_movepi16_mask (__m256i __A) +{ + return (__mmask16) __builtin_ia32_cvtw2mask256 ((__v16hi) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_movm_epi8 (__mmask16 __A) +{ + return (__m128i) __builtin_ia32_cvtmask2b128 (__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_movm_epi8 (__mmask32 __A) +{ + return (__m256i) __builtin_ia32_cvtmask2b256 (__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_movm_epi16 (__mmask8 __A) +{ + return (__m128i) __builtin_ia32_cvtmask2w128 (__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_movm_epi16 (__mmask16 __A) +{ + return (__m256i) __builtin_ia32_cvtmask2w256 (__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_broadcastb_epi8 (__m128i __O, __mmask16 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectb_128(__M, + (__v16qi) _mm_broadcastb_epi8(__A), + (__v16qi) __O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_broadcastb_epi8 (__mmask16 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectb_128(__M, + (__v16qi) _mm_broadcastb_epi8(__A), + (__v16qi) _mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_broadcastb_epi8 (__m256i __O, __mmask32 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectb_256(__M, + (__v32qi) _mm256_broadcastb_epi8(__A), + (__v32qi) __O); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_broadcastb_epi8 (__mmask32 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectb_256(__M, + (__v32qi) _mm256_broadcastb_epi8(__A), + (__v32qi) _mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_broadcastw_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectw_128(__M, + (__v8hi) _mm_broadcastw_epi16(__A), + (__v8hi) __O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_broadcastw_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectw_128(__M, + (__v8hi) _mm_broadcastw_epi16(__A), + (__v8hi) _mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_broadcastw_epi16 (__m256i __O, __mmask16 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectw_256(__M, + (__v16hi) _mm256_broadcastw_epi16(__A), + (__v16hi) __O); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_broadcastw_epi16 (__mmask16 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectw_256(__M, + (__v16hi) _mm256_broadcastw_epi16(__A), + (__v16hi) _mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_set1_epi16 (__m256i __O, __mmask16 __M, short __A) +{ + return (__m256i) __builtin_ia32_pbroadcastw256_gpr_mask (__A, + (__v16hi) __O, + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_set1_epi16 (__mmask16 __M, short __A) +{ + return (__m256i) __builtin_ia32_pbroadcastw256_gpr_mask (__A, + (__v16hi) _mm256_setzero_si256 (), + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_set1_epi16 (__m128i __O, __mmask8 __M, short __A) +{ + return (__m128i) __builtin_ia32_pbroadcastw128_gpr_mask (__A, + (__v8hi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_set1_epi16 (__mmask8 __M, short __A) +{ + return (__m128i) __builtin_ia32_pbroadcastw128_gpr_mask (__A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_permutexvar_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_permvarhi128_mask ((__v8hi) __B, + (__v8hi) __A, + (__v8hi) _mm_undefined_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_permutexvar_epi16 (__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_permvarhi128_mask ((__v8hi) __B, + (__v8hi) __A, + (__v8hi) _mm_setzero_si128 (), + (__mmask8) __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_permutexvar_epi16 (__m128i __W, __mmask8 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_permvarhi128_mask ((__v8hi) __B, + (__v8hi) __A, + (__v8hi) __W, + (__mmask8) __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_permutexvar_epi16 (__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_permvarhi256_mask ((__v16hi) __B, + (__v16hi) __A, + (__v16hi) _mm256_undefined_si256 (), + (__mmask16) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_permutexvar_epi16 (__mmask16 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_permvarhi256_mask ((__v16hi) __B, + (__v16hi) __A, + (__v16hi) _mm256_setzero_si256 (), + (__mmask16) __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_permutexvar_epi16 (__m256i __W, __mmask16 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_permvarhi256_mask ((__v16hi) __B, + (__v16hi) __A, + (__v16hi) __W, + (__mmask16) __M); +} + +#define _mm_mask_alignr_epi8(W, U, A, B, N) __extension__ ({ \ + (__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \ + (__v16qi)_mm_alignr_epi8((A), (B), (int)(N)), \ + (__v16qi)(__m128i)(W)); }) + +#define _mm_maskz_alignr_epi8(U, A, B, N) __extension__ ({ \ + (__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \ + (__v16qi)_mm_alignr_epi8((A), (B), (int)(N)), \ + (__v16qi)_mm_setzero_si128()); }) + +#define _mm256_mask_alignr_epi8(W, U, A, B, N) __extension__ ({ \ + (__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \ + (__v32qi)_mm256_alignr_epi8((A), (B), (int)(N)), \ + (__v32qi)(__m256i)(W)); }) + +#define _mm256_maskz_alignr_epi8(U, A, B, N) __extension__ ({ \ + (__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \ + (__v32qi)_mm256_alignr_epi8((A), (B), (int)(N)), \ + (__v32qi)_mm256_setzero_si256()); }) + +#define _mm_dbsad_epu8(A, B, imm) __extension__ ({ \ + (__m128i)__builtin_ia32_dbpsadbw128_mask((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(imm), \ + (__v8hi)_mm_setzero_hi(), \ + (__mmask8)-1); }) + +#define _mm_mask_dbsad_epu8(W, U, A, B, imm) __extension__ ({ \ + (__m128i)__builtin_ia32_dbpsadbw128_mask((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(imm), \ + (__v8hi)(__m128i)(W), \ + (__mmask8)(U)); }) + +#define _mm_maskz_dbsad_epu8(U, A, B, imm) __extension__ ({ \ + (__m128i)__builtin_ia32_dbpsadbw128_mask((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(imm), \ + (__v8hi)_mm_setzero_si128(), \ + (__mmask8)(U)); }) + +#define _mm256_dbsad_epu8(A, B, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_dbpsadbw256_mask((__v32qi)(__m256i)(A), \ + (__v32qi)(__m256i)(B), (int)(imm), \ + (__v16hi)_mm256_setzero_si256(), \ + (__mmask16)-1); }) + +#define _mm256_mask_dbsad_epu8(W, U, A, B, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_dbpsadbw256_mask((__v32qi)(__m256i)(A), \ + (__v32qi)(__m256i)(B), (int)(imm), \ + (__v16hi)(__m256i)(W), \ + (__mmask16)(U)); }) + +#define _mm256_maskz_dbsad_epu8(U, A, B, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_dbpsadbw256_mask((__v32qi)(__m256i)(A), \ + (__v32qi)(__m256i)(B), (int)(imm), \ + (__v16hi)_mm256_setzero_si256(), \ + (__mmask16)(U)); }) #undef __DEFAULT_FN_ATTRS diff --git a/c_headers/avx512vlcdintrin.h b/c_headers/avx512vlcdintrin.h new file mode 100644 index 000000000..7b02e2e1f --- /dev/null +++ b/c_headers/avx512vlcdintrin.h @@ -0,0 +1,263 @@ +/*===---- avx512vlcdintrin.h - AVX512VL and AVX512CD intrinsics ---------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512VLCDINTRIN_H +#define __AVX512VLCDINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512cd"))) + + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_broadcastmb_epi64 (__mmask8 __A) +{ + return (__m128i) __builtin_ia32_broadcastmb128 (__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_broadcastmb_epi64 (__mmask8 __A) +{ + return (__m256i) __builtin_ia32_broadcastmb256 (__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_broadcastmw_epi32 (__mmask16 __A) +{ + return (__m128i) __builtin_ia32_broadcastmw128 (__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_broadcastmw_epi32 (__mmask16 __A) +{ + return (__m256i) __builtin_ia32_broadcastmw256 (__A); +} + + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_conflict_epi64 (__m128i __A) +{ + return (__m128i) __builtin_ia32_vpconflictdi_128_mask ((__v2di) __A, + (__v2di) _mm_undefined_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_conflict_epi64 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_vpconflictdi_128_mask ((__v2di) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_conflict_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_vpconflictdi_128_mask ((__v2di) __A, + (__v2di) + _mm_setzero_di (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_conflict_epi64 (__m256i __A) +{ + return (__m256i) __builtin_ia32_vpconflictdi_256_mask ((__v4di) __A, + (__v4di) _mm256_undefined_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_conflict_epi64 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_vpconflictdi_256_mask ((__v4di) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_conflict_epi64 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_vpconflictdi_256_mask ((__v4di) __A, + (__v4di) _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_conflict_epi32 (__m128i __A) +{ + return (__m128i) __builtin_ia32_vpconflictsi_128_mask ((__v4si) __A, + (__v4si) _mm_undefined_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_conflict_epi32 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_vpconflictsi_128_mask ((__v4si) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_conflict_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_vpconflictsi_128_mask ((__v4si) __A, + (__v4si) _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_conflict_epi32 (__m256i __A) +{ + return (__m256i) __builtin_ia32_vpconflictsi_256_mask ((__v8si) __A, + (__v8si) _mm256_undefined_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_conflict_epi32 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_vpconflictsi_256_mask ((__v8si) __A, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_conflict_epi32 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_vpconflictsi_256_mask ((__v8si) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_lzcnt_epi32 (__m128i __A) +{ + return (__m128i) __builtin_ia32_vplzcntd_128_mask ((__v4si) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_lzcnt_epi32 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_vplzcntd_128_mask ((__v4si) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_lzcnt_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_vplzcntd_128_mask ((__v4si) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_lzcnt_epi32 (__m256i __A) +{ + return (__m256i) __builtin_ia32_vplzcntd_256_mask ((__v8si) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_lzcnt_epi32 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_vplzcntd_256_mask ((__v8si) __A, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_lzcnt_epi32 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_vplzcntd_256_mask ((__v8si) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_lzcnt_epi64 (__m128i __A) +{ + return (__m128i) __builtin_ia32_vplzcntq_128_mask ((__v2di) __A, + (__v2di) + _mm_setzero_di (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_lzcnt_epi64 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_vplzcntq_128_mask ((__v2di) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_lzcnt_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_vplzcntq_128_mask ((__v2di) __A, + (__v2di) + _mm_setzero_di (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_lzcnt_epi64 (__m256i __A) +{ + return (__m256i) __builtin_ia32_vplzcntq_256_mask ((__v4di) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_lzcnt_epi64 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_vplzcntq_256_mask ((__v4di) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_lzcnt_epi64 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_vplzcntq_256_mask ((__v4di) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* __AVX512VLCDINTRIN_H */ diff --git a/c_headers/avx512vldqintrin.h b/c_headers/avx512vldqintrin.h index 1edf29d12..cd9da4370 100644 --- a/c_headers/avx512vldqintrin.h +++ b/c_headers/avx512vldqintrin.h @@ -1,4 +1,4 @@ -/*===---- avx512vldqintrin.h - AVX512VL and AVX512DQ intrinsics ---------------------------=== +/*===---- avx512vldqintrin.h - AVX512VL and AVX512DQ intrinsics ------------=== * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -20,7 +20,7 @@ * *===-----------------------------------------------------------------------=== */ - + #ifndef __IMMINTRIN_H #error "Never use directly; include instead." #endif @@ -29,325 +29,1172 @@ #define __AVX512VLDQINTRIN_H /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512dq"))) static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_mullo_epi64 (__m256i __A, __m256i __B) { - return (__m256i) ((__v4di) __A * (__v4di) __B); + return (__m256i) ((__v4du) __A * (__v4du) __B); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_mullo_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pmullq256_mask ((__v4di) __A, - (__v4di) __B, - (__v4di) __W, - (__mmask8) __U); +_mm256_mask_mullo_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_mullo_epi64(__A, __B), + (__v4di)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_mullo_epi64 (__mmask8 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pmullq256_mask ((__v4di) __A, - (__v4di) __B, - (__v4di) - _mm256_setzero_si256 (), - (__mmask8) __U); +_mm256_maskz_mullo_epi64(__mmask8 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_mullo_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); } static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi64 (__m128i __A, __m128i __B) { - return (__m128i) ((__v2di) __A * (__v2di) __B); + return (__m128i) ((__v2du) __A * (__v2du) __B); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_mullo_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pmullq128_mask ((__v2di) __A, - (__v2di) __B, - (__v2di) __W, - (__mmask8) __U); +_mm_mask_mullo_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_mullo_epi64(__A, __B), + (__v2di)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_mullo_epi64 (__mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pmullq128_mask ((__v2di) __A, - (__v2di) __B, - (__v2di) - _mm_setzero_si128 (), - (__mmask8) __U); +_mm_maskz_mullo_epi64(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_mullo_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); } static __inline__ __m256d __DEFAULT_FN_ATTRS -_mm256_mask_andnot_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { - return (__m256d) __builtin_ia32_andnpd256_mask ((__v4df) __A, - (__v4df) __B, - (__v4df) __W, - (__mmask8) __U); +_mm256_mask_andnot_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_andnot_pd(__A, __B), + (__v4df)__W); } static __inline__ __m256d __DEFAULT_FN_ATTRS -_mm256_maskz_andnot_pd (__mmask8 __U, __m256d __A, __m256d __B) { - return (__m256d) __builtin_ia32_andnpd256_mask ((__v4df) __A, - (__v4df) __B, - (__v4df) - _mm256_setzero_pd (), - (__mmask8) __U); +_mm256_maskz_andnot_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_andnot_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); } static __inline__ __m128d __DEFAULT_FN_ATTRS -_mm_mask_andnot_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { - return (__m128d) __builtin_ia32_andnpd128_mask ((__v2df) __A, - (__v2df) __B, - (__v2df) __W, - (__mmask8) __U); +_mm_mask_andnot_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_andnot_pd(__A, __B), + (__v2df)__W); } static __inline__ __m128d __DEFAULT_FN_ATTRS -_mm_maskz_andnot_pd (__mmask8 __U, __m128d __A, __m128d __B) { - return (__m128d) __builtin_ia32_andnpd128_mask ((__v2df) __A, - (__v2df) __B, - (__v2df) - _mm_setzero_pd (), - (__mmask8) __U); +_mm_maskz_andnot_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_andnot_pd(__A, __B), + (__v2df)_mm_setzero_pd()); } static __inline__ __m256 __DEFAULT_FN_ATTRS -_mm256_mask_andnot_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { - return (__m256) __builtin_ia32_andnps256_mask ((__v8sf) __A, - (__v8sf) __B, - (__v8sf) __W, - (__mmask8) __U); +_mm256_mask_andnot_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_andnot_ps(__A, __B), + (__v8sf)__W); } static __inline__ __m256 __DEFAULT_FN_ATTRS -_mm256_maskz_andnot_ps (__mmask8 __U, __m256 __A, __m256 __B) { - return (__m256) __builtin_ia32_andnps256_mask ((__v8sf) __A, - (__v8sf) __B, - (__v8sf) - _mm256_setzero_ps (), - (__mmask8) __U); +_mm256_maskz_andnot_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_andnot_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); } static __inline__ __m128 __DEFAULT_FN_ATTRS -_mm_mask_andnot_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { - return (__m128) __builtin_ia32_andnps128_mask ((__v4sf) __A, - (__v4sf) __B, - (__v4sf) __W, - (__mmask8) __U); +_mm_mask_andnot_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_andnot_ps(__A, __B), + (__v4sf)__W); } static __inline__ __m128 __DEFAULT_FN_ATTRS -_mm_maskz_andnot_ps (__mmask8 __U, __m128 __A, __m128 __B) { - return (__m128) __builtin_ia32_andnps128_mask ((__v4sf) __A, - (__v4sf) __B, - (__v4sf) - _mm_setzero_ps (), - (__mmask8) __U); +_mm_maskz_andnot_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_andnot_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); } static __inline__ __m256d __DEFAULT_FN_ATTRS -_mm256_mask_and_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { - return (__m256d) __builtin_ia32_andpd256_mask ((__v4df) __A, - (__v4df) __B, - (__v4df) __W, - (__mmask8) __U); +_mm256_mask_and_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_and_pd(__A, __B), + (__v4df)__W); } static __inline__ __m256d __DEFAULT_FN_ATTRS -_mm256_maskz_and_pd (__mmask8 __U, __m256d __A, __m256d __B) { - return (__m256d) __builtin_ia32_andpd256_mask ((__v4df) __A, - (__v4df) __B, - (__v4df) - _mm256_setzero_pd (), - (__mmask8) __U); +_mm256_maskz_and_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_and_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); } static __inline__ __m128d __DEFAULT_FN_ATTRS -_mm_mask_and_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { - return (__m128d) __builtin_ia32_andpd128_mask ((__v2df) __A, - (__v2df) __B, - (__v2df) __W, - (__mmask8) __U); +_mm_mask_and_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_and_pd(__A, __B), + (__v2df)__W); } static __inline__ __m128d __DEFAULT_FN_ATTRS -_mm_maskz_and_pd (__mmask8 __U, __m128d __A, __m128d __B) { - return (__m128d) __builtin_ia32_andpd128_mask ((__v2df) __A, - (__v2df) __B, - (__v2df) - _mm_setzero_pd (), - (__mmask8) __U); +_mm_maskz_and_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_and_pd(__A, __B), + (__v2df)_mm_setzero_pd()); } static __inline__ __m256 __DEFAULT_FN_ATTRS -_mm256_mask_and_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { - return (__m256) __builtin_ia32_andps256_mask ((__v8sf) __A, - (__v8sf) __B, - (__v8sf) __W, - (__mmask8) __U); +_mm256_mask_and_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_and_ps(__A, __B), + (__v8sf)__W); } static __inline__ __m256 __DEFAULT_FN_ATTRS -_mm256_maskz_and_ps (__mmask8 __U, __m256 __A, __m256 __B) { - return (__m256) __builtin_ia32_andps256_mask ((__v8sf) __A, - (__v8sf) __B, - (__v8sf) - _mm256_setzero_ps (), - (__mmask8) __U); +_mm256_maskz_and_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_and_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); } static __inline__ __m128 __DEFAULT_FN_ATTRS -_mm_mask_and_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { - return (__m128) __builtin_ia32_andps128_mask ((__v4sf) __A, - (__v4sf) __B, - (__v4sf) __W, - (__mmask8) __U); +_mm_mask_and_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_and_ps(__A, __B), + (__v4sf)__W); } static __inline__ __m128 __DEFAULT_FN_ATTRS -_mm_maskz_and_ps (__mmask8 __U, __m128 __A, __m128 __B) { - return (__m128) __builtin_ia32_andps128_mask ((__v4sf) __A, - (__v4sf) __B, - (__v4sf) - _mm_setzero_ps (), - (__mmask8) __U); +_mm_maskz_and_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_and_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); } static __inline__ __m256d __DEFAULT_FN_ATTRS -_mm256_mask_xor_pd (__m256d __W, __mmask8 __U, __m256d __A, - __m256d __B) { - return (__m256d) __builtin_ia32_xorpd256_mask ((__v4df) __A, - (__v4df) __B, - (__v4df) __W, - (__mmask8) __U); +_mm256_mask_xor_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_xor_pd(__A, __B), + (__v4df)__W); } static __inline__ __m256d __DEFAULT_FN_ATTRS -_mm256_maskz_xor_pd (__mmask8 __U, __m256d __A, __m256d __B) { - return (__m256d) __builtin_ia32_xorpd256_mask ((__v4df) __A, - (__v4df) __B, - (__v4df) - _mm256_setzero_pd (), - (__mmask8) __U); +_mm256_maskz_xor_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_xor_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); } static __inline__ __m128d __DEFAULT_FN_ATTRS -_mm_mask_xor_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { - return (__m128d) __builtin_ia32_xorpd128_mask ((__v2df) __A, - (__v2df) __B, - (__v2df) __W, - (__mmask8) __U); +_mm_mask_xor_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_xor_pd(__A, __B), + (__v2df)__W); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_maskz_xor_pd (__mmask8 __U, __m128d __A, __m128d __B) { - return (__m128d) __builtin_ia32_xorpd128_mask ((__v2df) __A, - (__v2df) __B, - (__v2df) - _mm_setzero_pd (), - (__mmask8) __U); + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_xor_pd(__A, __B), + (__v2df)_mm_setzero_pd()); } static __inline__ __m256 __DEFAULT_FN_ATTRS -_mm256_mask_xor_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { - return (__m256) __builtin_ia32_xorps256_mask ((__v8sf) __A, - (__v8sf) __B, - (__v8sf) __W, - (__mmask8) __U); +_mm256_mask_xor_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_xor_ps(__A, __B), + (__v8sf)__W); } static __inline__ __m256 __DEFAULT_FN_ATTRS -_mm256_maskz_xor_ps (__mmask8 __U, __m256 __A, __m256 __B) { - return (__m256) __builtin_ia32_xorps256_mask ((__v8sf) __A, - (__v8sf) __B, - (__v8sf) - _mm256_setzero_ps (), - (__mmask8) __U); +_mm256_maskz_xor_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_xor_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); } static __inline__ __m128 __DEFAULT_FN_ATTRS -_mm_mask_xor_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { - return (__m128) __builtin_ia32_xorps128_mask ((__v4sf) __A, - (__v4sf) __B, - (__v4sf) __W, - (__mmask8) __U); +_mm_mask_xor_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_xor_ps(__A, __B), + (__v4sf)__W); } static __inline__ __m128 __DEFAULT_FN_ATTRS -_mm_maskz_xor_ps (__mmask8 __U, __m128 __A, __m128 __B) { - return (__m128) __builtin_ia32_xorps128_mask ((__v4sf) __A, - (__v4sf) __B, - (__v4sf) - _mm_setzero_ps (), - (__mmask8) __U); +_mm_maskz_xor_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_xor_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); } static __inline__ __m256d __DEFAULT_FN_ATTRS -_mm256_mask_or_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { - return (__m256d) __builtin_ia32_orpd256_mask ((__v4df) __A, - (__v4df) __B, - (__v4df) __W, - (__mmask8) __U); +_mm256_mask_or_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_or_pd(__A, __B), + (__v4df)__W); } static __inline__ __m256d __DEFAULT_FN_ATTRS -_mm256_maskz_or_pd (__mmask8 __U, __m256d __A, __m256d __B) { - return (__m256d) __builtin_ia32_orpd256_mask ((__v4df) __A, - (__v4df) __B, - (__v4df) - _mm256_setzero_pd (), - (__mmask8) __U); +_mm256_maskz_or_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_or_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); } static __inline__ __m128d __DEFAULT_FN_ATTRS -_mm_mask_or_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { - return (__m128d) __builtin_ia32_orpd128_mask ((__v2df) __A, - (__v2df) __B, - (__v2df) __W, - (__mmask8) __U); +_mm_mask_or_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_or_pd(__A, __B), + (__v2df)__W); } static __inline__ __m128d __DEFAULT_FN_ATTRS -_mm_maskz_or_pd (__mmask8 __U, __m128d __A, __m128d __B) { - return (__m128d) __builtin_ia32_orpd128_mask ((__v2df) __A, - (__v2df) __B, - (__v2df) - _mm_setzero_pd (), - (__mmask8) __U); +_mm_maskz_or_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_or_pd(__A, __B), + (__v2df)_mm_setzero_pd()); } static __inline__ __m256 __DEFAULT_FN_ATTRS -_mm256_mask_or_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { - return (__m256) __builtin_ia32_orps256_mask ((__v8sf) __A, - (__v8sf) __B, - (__v8sf) __W, - (__mmask8) __U); +_mm256_mask_or_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_or_ps(__A, __B), + (__v8sf)__W); } static __inline__ __m256 __DEFAULT_FN_ATTRS -_mm256_maskz_or_ps (__mmask8 __U, __m256 __A, __m256 __B) { - return (__m256) __builtin_ia32_orps256_mask ((__v8sf) __A, - (__v8sf) __B, - (__v8sf) - _mm256_setzero_ps (), - (__mmask8) __U); +_mm256_maskz_or_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_or_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); } static __inline__ __m128 __DEFAULT_FN_ATTRS -_mm_mask_or_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { - return (__m128) __builtin_ia32_orps128_mask ((__v4sf) __A, - (__v4sf) __B, - (__v4sf) __W, - (__mmask8) __U); +_mm_mask_or_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_or_ps(__A, __B), + (__v4sf)__W); } static __inline__ __m128 __DEFAULT_FN_ATTRS -_mm_maskz_or_ps (__mmask8 __U, __m128 __A, __m128 __B) { - return (__m128) __builtin_ia32_orps128_mask ((__v4sf) __A, - (__v4sf) __B, - (__v4sf) - _mm_setzero_ps (), - (__mmask8) __U); +_mm_maskz_or_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_or_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); } +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtpd_epi64 (__m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtpd_epi64 (__m128i __W, __mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtpd_epi64 (__mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_cvtpd_epi64 (__m256d __A) { + return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_cvtpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A) { + return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtpd_epi64 (__mmask8 __U, __m256d __A) { + return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtpd_epu64 (__m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtpd_epu64 (__m128i __W, __mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtpd_epu64 (__mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_cvtpd_epu64 (__m256d __A) { + return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_cvtpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A) { + return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtpd_epu64 (__mmask8 __U, __m256d __A) { + return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtps_epi64 (__m128 __A) { + return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtps_epi64 (__m128i __W, __mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtps_epi64 (__mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_cvtps_epi64 (__m128 __A) { + return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_cvtps_epi64 (__m256i __W, __mmask8 __U, __m128 __A) { + return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtps_epi64 (__mmask8 __U, __m128 __A) { + return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtps_epu64 (__m128 __A) { + return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtps_epu64 (__m128i __W, __mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtps_epu64 (__mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_cvtps_epu64 (__m128 __A) { + return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_cvtps_epu64 (__m256i __W, __mmask8 __U, __m128 __A) { + return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtps_epu64 (__mmask8 __U, __m128 __A) { + return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cvtepi64_pd (__m128i __A) { + return (__m128d) __builtin_ia32_cvtqq2pd128_mask ((__v2di) __A, + (__v2df) _mm_setzero_pd(), + (__mmask8) -1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_cvtepi64_pd (__m128d __W, __mmask8 __U, __m128i __A) { + return (__m128d) __builtin_ia32_cvtqq2pd128_mask ((__v2di) __A, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_cvtepi64_pd (__mmask8 __U, __m128i __A) { + return (__m128d) __builtin_ia32_cvtqq2pd128_mask ((__v2di) __A, + (__v2df) _mm_setzero_pd(), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_cvtepi64_pd (__m256i __A) { + return (__m256d) __builtin_ia32_cvtqq2pd256_mask ((__v4di) __A, + (__v4df) _mm256_setzero_pd(), + (__mmask8) -1); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_cvtepi64_pd (__m256d __W, __mmask8 __U, __m256i __A) { + return (__m256d) __builtin_ia32_cvtqq2pd256_mask ((__v4di) __A, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepi64_pd (__mmask8 __U, __m256i __A) { + return (__m256d) __builtin_ia32_cvtqq2pd256_mask ((__v4di) __A, + (__v4df) _mm256_setzero_pd(), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cvtepi64_ps (__m128i __A) { + return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A, + (__v4sf) _mm_setzero_ps(), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_cvtepi64_ps (__m128 __W, __mmask8 __U, __m128i __A) { + return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_cvtepi64_ps (__mmask8 __U, __m128i __A) { + return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A, + (__v4sf) _mm_setzero_ps(), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm256_cvtepi64_ps (__m256i __A) { + return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A, + (__v4sf) _mm_setzero_ps(), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm256_mask_cvtepi64_ps (__m128 __W, __mmask8 __U, __m256i __A) { + return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepi64_ps (__mmask8 __U, __m256i __A) { + return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A, + (__v4sf) _mm_setzero_ps(), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvttpd_epi64 (__m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvttpd_epi64 (__m128i __W, __mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvttpd_epi64 (__mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_cvttpd_epi64 (__m256d __A) { + return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_cvttpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A) { + return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_cvttpd_epi64 (__mmask8 __U, __m256d __A) { + return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvttpd_epu64 (__m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvttpd_epu64 (__m128i __W, __mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvttpd_epu64 (__mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_cvttpd_epu64 (__m256d __A) { + return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_cvttpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A) { + return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_cvttpd_epu64 (__mmask8 __U, __m256d __A) { + return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvttps_epi64 (__m128 __A) { + return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvttps_epi64 (__m128i __W, __mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvttps_epi64 (__mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_cvttps_epi64 (__m128 __A) { + return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_cvttps_epi64 (__m256i __W, __mmask8 __U, __m128 __A) { + return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_cvttps_epi64 (__mmask8 __U, __m128 __A) { + return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvttps_epu64 (__m128 __A) { + return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvttps_epu64 (__m128i __W, __mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvttps_epu64 (__mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_cvttps_epu64 (__m128 __A) { + return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_cvttps_epu64 (__m256i __W, __mmask8 __U, __m128 __A) { + return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_cvttps_epu64 (__mmask8 __U, __m128 __A) { + return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cvtepu64_pd (__m128i __A) { + return (__m128d) __builtin_ia32_cvtuqq2pd128_mask ((__v2di) __A, + (__v2df) _mm_setzero_pd(), + (__mmask8) -1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_cvtepu64_pd (__m128d __W, __mmask8 __U, __m128i __A) { + return (__m128d) __builtin_ia32_cvtuqq2pd128_mask ((__v2di) __A, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_cvtepu64_pd (__mmask8 __U, __m128i __A) { + return (__m128d) __builtin_ia32_cvtuqq2pd128_mask ((__v2di) __A, + (__v2df) _mm_setzero_pd(), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_cvtepu64_pd (__m256i __A) { + return (__m256d) __builtin_ia32_cvtuqq2pd256_mask ((__v4di) __A, + (__v4df) _mm256_setzero_pd(), + (__mmask8) -1); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_cvtepu64_pd (__m256d __W, __mmask8 __U, __m256i __A) { + return (__m256d) __builtin_ia32_cvtuqq2pd256_mask ((__v4di) __A, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepu64_pd (__mmask8 __U, __m256i __A) { + return (__m256d) __builtin_ia32_cvtuqq2pd256_mask ((__v4di) __A, + (__v4df) _mm256_setzero_pd(), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cvtepu64_ps (__m128i __A) { + return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A, + (__v4sf) _mm_setzero_ps(), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_cvtepu64_ps (__m128 __W, __mmask8 __U, __m128i __A) { + return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_cvtepu64_ps (__mmask8 __U, __m128i __A) { + return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A, + (__v4sf) _mm_setzero_ps(), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm256_cvtepu64_ps (__m256i __A) { + return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A, + (__v4sf) _mm_setzero_ps(), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm256_mask_cvtepu64_ps (__m128 __W, __mmask8 __U, __m256i __A) { + return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepu64_ps (__mmask8 __U, __m256i __A) { + return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A, + (__v4sf) _mm_setzero_ps(), + (__mmask8) __U); +} + +#define _mm_range_pd(A, B, C) __extension__ ({ \ + (__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1); }) + +#define _mm_mask_range_pd(W, U, A, B, C) __extension__ ({ \ + (__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U)); }) + +#define _mm_maskz_range_pd(U, A, B, C) __extension__ ({ \ + (__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U)); }) + +#define _mm256_range_pd(A, B, C) __extension__ ({ \ + (__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), (int)(C), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)-1); }) + +#define _mm256_mask_range_pd(W, U, A, B, C) __extension__ ({ \ + (__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), (int)(C), \ + (__v4df)(__m256d)(W), \ + (__mmask8)(U)); }) + +#define _mm256_maskz_range_pd(U, A, B, C) __extension__ ({ \ + (__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), (int)(C), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)(U)); }) + +#define _mm_range_ps(A, B, C) __extension__ ({ \ + (__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1); }) + +#define _mm_mask_range_ps(W, U, A, B, C) __extension__ ({ \ + (__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), \ + (__v4sf)(__m128)(W), (__mmask8)(U)); }) + +#define _mm_maskz_range_ps(U, A, B, C) __extension__ ({ \ + (__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U)); }) + +#define _mm256_range_ps(A, B, C) __extension__ ({ \ + (__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), (int)(C), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)-1); }) + +#define _mm256_mask_range_ps(W, U, A, B, C) __extension__ ({ \ + (__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), (int)(C), \ + (__v8sf)(__m256)(W), (__mmask8)(U)); }) + +#define _mm256_maskz_range_ps(U, A, B, C) __extension__ ({ \ + (__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), (int)(C), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)(U)); }) + +#define _mm_reduce_pd(A, B) __extension__ ({ \ + (__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1); }) + +#define _mm_mask_reduce_pd(W, U, A, B) __extension__ ({ \ + (__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U)); }) + +#define _mm_maskz_reduce_pd(U, A, B) __extension__ ({ \ + (__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U)); }) + +#define _mm256_reduce_pd(A, B) __extension__ ({ \ + (__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)-1); }) + +#define _mm256_mask_reduce_pd(W, U, A, B) __extension__ ({ \ + (__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \ + (__v4df)(__m256d)(W), \ + (__mmask8)(U)); }) + +#define _mm256_maskz_reduce_pd(U, A, B) __extension__ ({ \ + (__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)(U)); }) + +#define _mm_reduce_ps(A, B) __extension__ ({ \ + (__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1); }) + +#define _mm_mask_reduce_ps(W, U, A, B) __extension__ ({ \ + (__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U)); }) + +#define _mm_maskz_reduce_ps(U, A, B) __extension__ ({ \ + (__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U)); }) + +#define _mm256_reduce_ps(A, B) __extension__ ({ \ + (__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)-1); }) + +#define _mm256_mask_reduce_ps(W, U, A, B) __extension__ ({ \ + (__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \ + (__v8sf)(__m256)(W), \ + (__mmask8)(U)); }) + +#define _mm256_maskz_reduce_ps(U, A, B) __extension__ ({ \ + (__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)(U)); }) + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm_movepi32_mask (__m128i __A) +{ + return (__mmask8) __builtin_ia32_cvtd2mask128 ((__v4si) __A); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm256_movepi32_mask (__m256i __A) +{ + return (__mmask8) __builtin_ia32_cvtd2mask256 ((__v8si) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_movm_epi32 (__mmask8 __A) +{ + return (__m128i) __builtin_ia32_cvtmask2d128 (__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_movm_epi32 (__mmask8 __A) +{ + return (__m256i) __builtin_ia32_cvtmask2d256 (__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_movm_epi64 (__mmask8 __A) +{ + return (__m128i) __builtin_ia32_cvtmask2q128 (__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_movm_epi64 (__mmask8 __A) +{ + return (__m256i) __builtin_ia32_cvtmask2q256 (__A); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm_movepi64_mask (__m128i __A) +{ + return (__mmask8) __builtin_ia32_cvtq2mask128 ((__v2di) __A); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm256_movepi64_mask (__m256i __A) +{ + return (__mmask8) __builtin_ia32_cvtq2mask256 ((__v4di) __A); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_broadcast_f32x2 (__m128 __A) +{ + return (__m256) __builtin_ia32_broadcastf32x2_256_mask ((__v4sf) __A, + (__v8sf)_mm256_undefined_ps(), + (__mmask8) -1); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_broadcast_f32x2 (__m256 __O, __mmask8 __M, __m128 __A) +{ + return (__m256) __builtin_ia32_broadcastf32x2_256_mask ((__v4sf) __A, + (__v8sf) __O, + __M); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_broadcast_f32x2 (__mmask8 __M, __m128 __A) +{ + return (__m256) __builtin_ia32_broadcastf32x2_256_mask ((__v4sf) __A, + (__v8sf) _mm256_setzero_ps (), + __M); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_broadcast_f64x2 (__m128d __A) +{ + return (__m256d) __builtin_ia32_broadcastf64x2_256_mask ((__v2df) __A, + (__v4df)_mm256_undefined_pd(), + (__mmask8) -1); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_broadcast_f64x2 (__m256d __O, __mmask8 __M, __m128d __A) +{ + return (__m256d) __builtin_ia32_broadcastf64x2_256_mask ((__v2df) __A, + (__v4df) __O, + __M); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_broadcast_f64x2 (__mmask8 __M, __m128d __A) +{ + return (__m256d) __builtin_ia32_broadcastf64x2_256_mask ((__v2df) __A, + (__v4df) _mm256_setzero_ps (), + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_broadcast_i32x2 (__m128i __A) +{ + return (__m128i) __builtin_ia32_broadcasti32x2_128_mask ((__v4si) __A, + (__v4si)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_broadcast_i32x2 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_broadcasti32x2_128_mask ((__v4si) __A, + (__v4si) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_broadcast_i32x2 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_broadcasti32x2_128_mask ((__v4si) __A, + (__v4si) _mm_setzero_si128 (), + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_broadcast_i32x2 (__m128i __A) +{ + return (__m256i) __builtin_ia32_broadcasti32x2_256_mask ((__v4si) __A, + (__v8si)_mm256_undefined_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_broadcast_i32x2 (__m256i __O, __mmask8 __M, __m128i __A) +{ + return (__m256i) __builtin_ia32_broadcasti32x2_256_mask ((__v4si) __A, + (__v8si) __O, + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_broadcast_i32x2 (__mmask8 __M, __m128i __A) +{ + return (__m256i) __builtin_ia32_broadcasti32x2_256_mask ((__v4si) __A, + (__v8si) _mm256_setzero_si256 (), + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_broadcast_i64x2 (__m128i __A) +{ + return (__m256i) __builtin_ia32_broadcasti64x2_256_mask ((__v2di) __A, + (__v4di)_mm256_undefined_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_broadcast_i64x2 (__m256i __O, __mmask8 __M, __m128i __A) +{ + return (__m256i) __builtin_ia32_broadcasti64x2_256_mask ((__v2di) __A, + (__v4di) __O, + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_broadcast_i64x2 (__mmask8 __M, __m128i __A) +{ + return (__m256i) __builtin_ia32_broadcasti64x2_256_mask ((__v2di) __A, + (__v4di) _mm256_setzero_si256 (), + __M); +} + +#define _mm256_extractf64x2_pd(A, imm) __extension__ ({ \ + (__m128d)__builtin_shufflevector((__v4df)(__m256d)(A), \ + (__v4df)_mm256_undefined_pd(), \ + ((imm) & 1) ? 2 : 0, \ + ((imm) & 1) ? 3 : 1); }) + +#define _mm256_mask_extractf64x2_pd(W, U, A, imm) __extension__ ({ \ + (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ + (__v2df)_mm256_extractf64x2_pd((A), (imm)), \ + (__v2df)(W)); }) + +#define _mm256_maskz_extractf64x2_pd(U, A, imm) __extension__ ({ \ + (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ + (__v2df)_mm256_extractf64x2_pd((A), (imm)), \ + (__v2df)_mm_setzero_pd()); }) + +#define _mm256_extracti64x2_epi64(A, imm) __extension__ ({ \ + (__m128i)__builtin_shufflevector((__v4di)(__m256i)(A), \ + (__v4di)_mm256_undefined_si256(), \ + ((imm) & 1) ? 2 : 0, \ + ((imm) & 1) ? 3 : 1); }) + +#define _mm256_mask_extracti64x2_epi64(W, U, A, imm) __extension__ ({ \ + (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \ + (__v2di)_mm256_extracti64x2_epi64((A), (imm)), \ + (__v2di)(W)); }) + +#define _mm256_maskz_extracti64x2_epi64(U, A, imm) __extension__ ({ \ + (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \ + (__v2di)_mm256_extracti64x2_epi64((A), (imm)), \ + (__v2di)_mm_setzero_di()); }) + +#define _mm256_insertf64x2(A, B, imm) __extension__ ({ \ + (__m256d)__builtin_shufflevector((__v4df)(A), \ + (__v4df)_mm256_castpd128_pd256((__m128d)(B)), \ + ((imm) & 0x1) ? 0 : 4, \ + ((imm) & 0x1) ? 1 : 5, \ + ((imm) & 0x1) ? 4 : 2, \ + ((imm) & 0x1) ? 5 : 3); }) + +#define _mm256_mask_insertf64x2(W, U, A, B, imm) __extension__ ({ \ + (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_insertf64x2((A), (B), (imm)), \ + (__v4df)(W)); }) + +#define _mm256_maskz_insertf64x2(U, A, B, imm) __extension__ ({ \ + (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_insertf64x2((A), (B), (imm)), \ + (__v4df)_mm256_setzero_pd()); }) + +#define _mm256_inserti64x2(A, B, imm) __extension__ ({ \ + (__m256i)__builtin_shufflevector((__v4di)(A), \ + (__v4di)_mm256_castsi128_si256((__m128i)(B)), \ + ((imm) & 0x1) ? 0 : 4, \ + ((imm) & 0x1) ? 1 : 5, \ + ((imm) & 0x1) ? 4 : 2, \ + ((imm) & 0x1) ? 5 : 3); }) + +#define _mm256_mask_inserti64x2(W, U, A, B, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_inserti64x2((A), (B), (imm)), \ + (__v4di)(W)); }) + +#define _mm256_maskz_inserti64x2(U, A, B, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_inserti64x2((A), (B), (imm)), \ + (__v4di)_mm256_setzero_si256()); }) + +#define _mm_mask_fpclass_pd_mask(U, A, imm) __extension__ ({ \ + (__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \ + (__mmask8)(U)); }) + +#define _mm_fpclass_pd_mask(A, imm) __extension__ ({ \ + (__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \ + (__mmask8)-1); }) + +#define _mm256_mask_fpclass_pd_mask(U, A, imm) __extension__ ({ \ + (__mmask8)__builtin_ia32_fpclasspd256_mask((__v4df)(__m256d)(A), (int)(imm), \ + (__mmask8)(U)); }) + +#define _mm256_fpclass_pd_mask(A, imm) __extension__ ({ \ + (__mmask8)__builtin_ia32_fpclasspd256_mask((__v4df)(__m256d)(A), (int)(imm), \ + (__mmask8)-1); }) + +#define _mm_mask_fpclass_ps_mask(U, A, imm) __extension__ ({ \ + (__mmask8)__builtin_ia32_fpclassps128_mask((__v4sf)(__m128)(A), (int)(imm), \ + (__mmask8)(U)); }) + +#define _mm_fpclass_ps_mask(A, imm) __extension__ ({ \ + (__mmask8)__builtin_ia32_fpclassps128_mask((__v4sf)(__m128)(A), (int)(imm), \ + (__mmask8)-1); }) + +#define _mm256_mask_fpclass_ps_mask(U, A, imm) __extension__ ({ \ + (__mmask8)__builtin_ia32_fpclassps256_mask((__v8sf)(__m256)(A), (int)(imm), \ + (__mmask8)(U)); }) + +#define _mm256_fpclass_ps_mask(A, imm) __extension__ ({ \ + (__mmask8)__builtin_ia32_fpclassps256_mask((__v8sf)(__m256)(A), (int)(imm), \ + (__mmask8)-1); }) + #undef __DEFAULT_FN_ATTRS #endif diff --git a/c_headers/avx512vlintrin.h b/c_headers/avx512vlintrin.h index fc1b9d6e7..f3744da6a 100644 --- a/c_headers/avx512vlintrin.h +++ b/c_headers/avx512vlintrin.h @@ -28,8 +28,13 @@ #ifndef __AVX512VLINTRIN_H #define __AVX512VLINTRIN_H -/* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl"))) + +/* Doesn't require avx512vl, used in avx512dqintrin.h */ +static __inline __m128i __attribute__((__always_inline__, __nodebug__, __target__("avx512f"))) +_mm_setzero_di(void) { + return (__m128i)(__v2di){ 0LL, 0LL}; +} /* Integer compare */ @@ -226,9 +231,6 @@ _mm256_mask_cmpge_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) { __u); } - - - static __inline__ __mmask8 __DEFAULT_FN_ATTRS _mm_cmpgt_epi32_mask(__m128i __a, __m128i __b) { return (__mmask8)__builtin_ia32_pcmpgtd128_mask((__v4si)__a, (__v4si)__b, @@ -614,711 +616,576 @@ _mm256_mask_cmpneq_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) { } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_add_epi32 (__m256i __W, __mmask8 __U, __m256i __A, - __m256i __B) +_mm256_mask_add_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_paddd256_mask ((__v8si) __A, - (__v8si) __B, - (__v8si) __W, - (__mmask8) __U); + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_add_epi32(__A, __B), + (__v8si)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_add_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +_mm256_maskz_add_epi32(__mmask8 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_paddd256_mask ((__v8si) __A, - (__v8si) __B, - (__v8si) - _mm256_setzero_si256 (), - (__mmask8) __U); + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_add_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_add_epi64 (__m256i __W, __mmask8 __U, __m256i __A, - __m256i __B) +_mm256_mask_add_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_paddq256_mask ((__v4di) __A, - (__v4di) __B, - (__v4di) __W, - (__mmask8) __U); + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_add_epi64(__A, __B), + (__v4di)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_add_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +_mm256_maskz_add_epi64(__mmask8 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_paddq256_mask ((__v4di) __A, - (__v4di) __B, - (__v4di) - _mm256_setzero_si256 (), - (__mmask8) __U); + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_add_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_sub_epi32 (__m256i __W, __mmask8 __U, __m256i __A, - __m256i __B) +_mm256_mask_sub_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_psubd256_mask ((__v8si) __A, - (__v8si) __B, - (__v8si) __W, - (__mmask8) __U); + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_sub_epi32(__A, __B), + (__v8si)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_sub_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +_mm256_maskz_sub_epi32(__mmask8 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_psubd256_mask ((__v8si) __A, - (__v8si) __B, - (__v8si) - _mm256_setzero_si256 (), - (__mmask8) __U); + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_sub_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_sub_epi64 (__m256i __W, __mmask8 __U, __m256i __A, - __m256i __B) +_mm256_mask_sub_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_psubq256_mask ((__v4di) __A, - (__v4di) __B, - (__v4di) __W, - (__mmask8) __U); + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_sub_epi64(__A, __B), + (__v4di)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_sub_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +_mm256_maskz_sub_epi64(__mmask8 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_psubq256_mask ((__v4di) __A, - (__v4di) __B, - (__v4di) - _mm256_setzero_si256 (), - (__mmask8) __U); + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_sub_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_add_epi32 (__m128i __W, __mmask8 __U, __m128i __A, - __m128i __B) +_mm_mask_add_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_paddd128_mask ((__v4si) __A, - (__v4si) __B, - (__v4si) __W, - (__mmask8) __U); + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_add_epi32(__A, __B), + (__v4si)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_add_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +_mm_maskz_add_epi32(__mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_paddd128_mask ((__v4si) __A, - (__v4si) __B, - (__v4si) - _mm_setzero_si128 (), - (__mmask8) __U); + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_add_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_add_epi64 (__m128i __W, __mmask8 __U, __m128i __A, - __m128i __B) +_mm_mask_add_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_paddq128_mask ((__v2di) __A, - (__v2di) __B, - (__v2di) __W, - (__mmask8) __U); + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_add_epi64(__A, __B), + (__v2di)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_add_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +_mm_maskz_add_epi64(__mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_paddq128_mask ((__v2di) __A, - (__v2di) __B, - (__v2di) - _mm_setzero_si128 (), - (__mmask8) __U); + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_add_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_sub_epi32 (__m128i __W, __mmask8 __U, __m128i __A, - __m128i __B) +_mm_mask_sub_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_psubd128_mask ((__v4si) __A, - (__v4si) __B, - (__v4si) __W, - (__mmask8) __U); + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_sub_epi32(__A, __B), + (__v4si)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_sub_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +_mm_maskz_sub_epi32(__mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_psubd128_mask ((__v4si) __A, - (__v4si) __B, - (__v4si) - _mm_setzero_si128 (), - (__mmask8) __U); + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_sub_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_sub_epi64 (__m128i __W, __mmask8 __U, __m128i __A, - __m128i __B) +_mm_mask_sub_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_psubq128_mask ((__v2di) __A, - (__v2di) __B, - (__v2di) __W, - (__mmask8) __U); + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_sub_epi64(__A, __B), + (__v2di)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_sub_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +_mm_maskz_sub_epi64(__mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_psubq128_mask ((__v2di) __A, - (__v2di) __B, - (__v2di) - _mm_setzero_si128 (), - (__mmask8) __U); + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_sub_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_mul_epi32 (__m256i __W, __mmask8 __M, __m256i __X, - __m256i __Y) +_mm256_mask_mul_epi32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) { - return (__m256i) __builtin_ia32_pmuldq256_mask ((__v8si) __X, - (__v8si) __Y, - (__v4di) __W, __M); + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_mul_epi32(__X, __Y), + (__v4di)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_mul_epi32 (__mmask8 __M, __m256i __X, __m256i __Y) +_mm256_maskz_mul_epi32(__mmask8 __M, __m256i __X, __m256i __Y) { - return (__m256i) __builtin_ia32_pmuldq256_mask ((__v8si) __X, - (__v8si) __Y, - (__v4di) - _mm256_setzero_si256 (), - __M); + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_mul_epi32(__X, __Y), + (__v4di)_mm256_setzero_si256()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_mul_epi32 (__m128i __W, __mmask8 __M, __m128i __X, - __m128i __Y) +_mm_mask_mul_epi32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) { - return (__m128i) __builtin_ia32_pmuldq128_mask ((__v4si) __X, - (__v4si) __Y, - (__v2di) __W, __M); + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_mul_epi32(__X, __Y), + (__v2di)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_mul_epi32 (__mmask8 __M, __m128i __X, __m128i __Y) +_mm_maskz_mul_epi32(__mmask8 __M, __m128i __X, __m128i __Y) { - return (__m128i) __builtin_ia32_pmuldq128_mask ((__v4si) __X, - (__v4si) __Y, - (__v2di) - _mm_setzero_si128 (), - __M); + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_mul_epi32(__X, __Y), + (__v2di)_mm_setzero_si128()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_mul_epu32 (__m256i __W, __mmask8 __M, __m256i __X, - __m256i __Y) +_mm256_mask_mul_epu32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) { - return (__m256i) __builtin_ia32_pmuludq256_mask ((__v8si) __X, - (__v8si) __Y, - (__v4di) __W, __M); + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_mul_epu32(__X, __Y), + (__v4di)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_mul_epu32 (__mmask8 __M, __m256i __X, __m256i __Y) +_mm256_maskz_mul_epu32(__mmask8 __M, __m256i __X, __m256i __Y) { - return (__m256i) __builtin_ia32_pmuludq256_mask ((__v8si) __X, - (__v8si) __Y, - (__v4di) - _mm256_setzero_si256 (), - __M); + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_mul_epu32(__X, __Y), + (__v4di)_mm256_setzero_si256()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_mul_epu32 (__m128i __W, __mmask8 __M, __m128i __X, - __m128i __Y) +_mm_mask_mul_epu32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) { - return (__m128i) __builtin_ia32_pmuludq128_mask ((__v4si) __X, - (__v4si) __Y, - (__v2di) __W, __M); + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_mul_epu32(__X, __Y), + (__v2di)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_mul_epu32 (__mmask8 __M, __m128i __X, __m128i __Y) +_mm_maskz_mul_epu32(__mmask8 __M, __m128i __X, __m128i __Y) { - return (__m128i) __builtin_ia32_pmuludq128_mask ((__v4si) __X, - (__v4si) __Y, - (__v2di) - _mm_setzero_si128 (), - __M); + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_mul_epu32(__X, __Y), + (__v2di)_mm_setzero_si128()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_mullo_epi32 (__mmask8 __M, __m256i __A, __m256i __B) +_mm256_maskz_mullo_epi32(__mmask8 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pmulld256_mask ((__v8si) __A, - (__v8si) __B, - (__v8si) - _mm256_setzero_si256 (), - __M); + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_mullo_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_mullo_epi32 (__m256i __W, __mmask8 __M, __m256i __A, - __m256i __B) +_mm256_mask_mullo_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pmulld256_mask ((__v8si) __A, - (__v8si) __B, - (__v8si) __W, __M); + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_mullo_epi32(__A, __B), + (__v8si)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_mullo_epi32 (__mmask8 __M, __m128i __A, __m128i __B) +_mm_maskz_mullo_epi32(__mmask8 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pmulld128_mask ((__v4si) __A, - (__v4si) __B, - (__v4si) - _mm_setzero_si128 (), - __M); + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_mullo_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_mullo_epi32 (__m128i __W, __mmask16 __M, __m128i __A, - __m128i __B) +_mm_mask_mullo_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pmulld128_mask ((__v4si) __A, - (__v4si) __B, - (__v4si) __W, __M); + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_mullo_epi32(__A, __B), + (__v4si)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_and_epi32 (__m256i __W, __mmask8 __U, __m256i __A, - __m256i __B) +_mm256_mask_and_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pandd256_mask ((__v8si) __A, - (__v8si) __B, - (__v8si) __W, - (__mmask8) __U); + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_and_si256(__A, __B), + (__v8si)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_and_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +_mm256_maskz_and_epi32(__mmask8 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pandd256_mask ((__v8si) __A, - (__v8si) __B, - (__v8si) - _mm256_setzero_si256 (), - (__mmask8) __U); + return (__m256i)_mm256_mask_and_epi32(_mm256_setzero_si256(), __U, __A, __B); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_and_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +_mm_mask_and_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pandd128_mask ((__v4si) __A, - (__v4si) __B, - (__v4si) __W, - (__mmask8) __U); + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_and_si128(__A, __B), + (__v4si)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_and_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +_mm_maskz_and_epi32(__mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pandd128_mask ((__v4si) __A, - (__v4si) __B, - (__v4si) - _mm_setzero_si128 (), - (__mmask8) __U); + return (__m128i)_mm_mask_and_epi32(_mm_setzero_si128(), __U, __A, __B); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_andnot_epi32 (__m256i __W, __mmask8 __U, __m256i __A, - __m256i __B) +_mm256_mask_andnot_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pandnd256_mask ((__v8si) __A, - (__v8si) __B, - (__v8si) __W, - (__mmask8) __U); + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_andnot_si256(__A, __B), + (__v8si)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_andnot_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +_mm256_maskz_andnot_epi32(__mmask8 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pandnd256_mask ((__v8si) __A, - (__v8si) __B, - (__v8si) - _mm256_setzero_si256 (), - (__mmask8) __U); + return (__m256i)_mm256_mask_andnot_epi32(_mm256_setzero_si256(), + __U, __A, __B); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_andnot_epi32 (__m128i __W, __mmask8 __U, __m128i __A, - __m128i __B) +_mm_mask_andnot_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pandnd128_mask ((__v4si) __A, - (__v4si) __B, - (__v4si) __W, - (__mmask8) __U); + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_andnot_si128(__A, __B), + (__v4si)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_maskz_andnot_epi32 (__mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pandnd128_mask ((__v4si) __A, - (__v4si) __B, - (__v4si) - _mm_setzero_si128 (), - (__mmask8) __U); + return (__m128i)_mm_mask_andnot_epi32(_mm_setzero_si128(), __U, __A, __B); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_or_epi32 (__m256i __W, __mmask8 __U, __m256i __A, - __m256i __B) +_mm256_mask_or_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pord256_mask ((__v8si) __A, - (__v8si) __B, - (__v8si) __W, - (__mmask8) __U); + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_or_si256(__A, __B), + (__v8si)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_or_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +_mm256_maskz_or_epi32(__mmask8 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pord256_mask ((__v8si) __A, - (__v8si) __B, - (__v8si) - _mm256_setzero_si256 (), - (__mmask8) __U); + return (__m256i)_mm256_mask_or_epi32(_mm256_setzero_si256(), __U, __A, __B); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_or_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +_mm_mask_or_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pord128_mask ((__v4si) __A, - (__v4si) __B, - (__v4si) __W, - (__mmask8) __U); + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_or_si128(__A, __B), + (__v4si)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_or_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +_mm_maskz_or_epi32(__mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pord128_mask ((__v4si) __A, - (__v4si) __B, - (__v4si) - _mm_setzero_si128 (), - (__mmask8) __U); + return (__m128i)_mm_mask_or_epi32(_mm_setzero_si128(), __U, __A, __B); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_xor_epi32 (__m256i __W, __mmask8 __U, __m256i __A, - __m256i __B) +_mm256_mask_xor_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pxord256_mask ((__v8si) __A, - (__v8si) __B, - (__v8si) __W, - (__mmask8) __U); + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_xor_si256(__A, __B), + (__v8si)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_xor_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +_mm256_maskz_xor_epi32(__mmask8 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pxord256_mask ((__v8si) __A, - (__v8si) __B, - (__v8si) - _mm256_setzero_si256 (), - (__mmask8) __U); + return (__m256i)_mm256_mask_xor_epi32(_mm256_setzero_si256(), __U, __A, __B); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_xor_epi32 (__m128i __W, __mmask8 __U, __m128i __A, +_mm_mask_xor_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pxord128_mask ((__v4si) __A, - (__v4si) __B, - (__v4si) __W, - (__mmask8) __U); + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_xor_si128(__A, __B), + (__v4si)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_xor_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +_mm_maskz_xor_epi32(__mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pxord128_mask ((__v4si) __A, - (__v4si) __B, - (__v4si) - _mm_setzero_si128 (), - (__mmask8) __U); + return (__m128i)_mm_mask_xor_epi32(_mm_setzero_si128(), __U, __A, __B); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_and_epi64 (__m256i __W, __mmask8 __U, __m256i __A, - __m256i __B) +_mm256_mask_and_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pandq256_mask ((__v4di) __A, - (__v4di) __B, - (__v4di) __W, __U); + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_and_si256(__A, __B), + (__v4di)__W); } static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_and_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +_mm256_maskz_and_epi64(__mmask8 __U, __m256i __A, __m256i __B) { - return (__m256i) __builtin_ia32_pandq256_mask ((__v4di) __A, - (__v4di) __B, - (__v4di) - _mm256_setzero_pd (), - __U); + return (__m256i)_mm256_mask_and_epi64(_mm256_setzero_si256(), __U, __A, __B); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_and_epi64 (__m128i __W, __mmask8 __U, __m128i __A, +_mm_mask_and_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_and_si128(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_and_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)_mm_mask_and_epi64(_mm_setzero_si128(), __U, __A, __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_andnot_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_andnot_si256(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_andnot_epi64(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)_mm256_mask_andnot_epi64(_mm256_setzero_si256(), + __U, __A, __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_andnot_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_andnot_si128(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_andnot_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)_mm_mask_andnot_epi64(_mm_setzero_si128(), __U, __A, __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_or_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_or_si256(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_or_epi64(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)_mm256_mask_or_epi64(_mm256_setzero_si256(), __U, __A, __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_or_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_or_si128(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_or_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)_mm_mask_or_epi64(_mm_setzero_si128(), __U, __A, __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_xor_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_xor_si256(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_xor_epi64(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)_mm256_mask_xor_epi64(_mm256_setzero_si256(), __U, __A, __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_xor_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pandq128_mask ((__v2di) __A, - (__v2di) __B, - (__v2di) __W, __U); + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_xor_si128(__A, __B), + (__v2di)__W); } static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_and_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +_mm_maskz_xor_epi64(__mmask8 __U, __m128i __A, __m128i __B) { - return (__m128i) __builtin_ia32_pandq128_mask ((__v2di) __A, - (__v2di) __B, - (__v2di) - _mm_setzero_pd (), - __U); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_andnot_epi64 (__m256i __W, __mmask8 __U, __m256i __A, - __m256i __B) -{ - return (__m256i) __builtin_ia32_pandnq256_mask ((__v4di) __A, - (__v4di) __B, - (__v4di) __W, __U); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_andnot_epi64 (__mmask8 __U, __m256i __A, __m256i __B) -{ - return (__m256i) __builtin_ia32_pandnq256_mask ((__v4di) __A, - (__v4di) __B, - (__v4di) - _mm256_setzero_pd (), - __U); -} - -static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_andnot_epi64 (__m128i __W, __mmask8 __U, __m128i __A, - __m128i __B) -{ - return (__m128i) __builtin_ia32_pandnq128_mask ((__v2di) __A, - (__v2di) __B, - (__v2di) __W, __U); -} - -static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_andnot_epi64 (__mmask8 __U, __m128i __A, __m128i __B) -{ - return (__m128i) __builtin_ia32_pandnq128_mask ((__v2di) __A, - (__v2di) __B, - (__v2di) - _mm_setzero_pd (), - __U); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_or_epi64 (__m256i __W, __mmask8 __U, __m256i __A, - __m256i __B) -{ - return (__m256i) __builtin_ia32_porq256_mask ((__v4di) __A, - (__v4di) __B, - (__v4di) __W, - (__mmask8) __U); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_or_epi64 (__mmask8 __U, __m256i __A, __m256i __B) -{ - return (__m256i) __builtin_ia32_porq256_mask ((__v4di) __A, - (__v4di) __B, - (__v4di) - _mm256_setzero_si256 (), - (__mmask8) __U); -} - -static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_or_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) -{ - return (__m128i) __builtin_ia32_porq128_mask ((__v2di) __A, - (__v2di) __B, - (__v2di) __W, - (__mmask8) __U); -} - -static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_or_epi64 (__mmask8 __U, __m128i __A, __m128i __B) -{ - return (__m128i) __builtin_ia32_porq128_mask ((__v2di) __A, - (__v2di) __B, - (__v2di) - _mm_setzero_si128 (), - (__mmask8) __U); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_mask_xor_epi64 (__m256i __W, __mmask8 __U, __m256i __A, - __m256i __B) -{ - return (__m256i) __builtin_ia32_pxorq256_mask ((__v4di) __A, - (__v4di) __B, - (__v4di) __W, - (__mmask8) __U); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS -_mm256_maskz_xor_epi64 (__mmask8 __U, __m256i __A, __m256i __B) -{ - return (__m256i) __builtin_ia32_pxorq256_mask ((__v4di) __A, - (__v4di) __B, - (__v4di) - _mm256_setzero_si256 (), - (__mmask8) __U); -} - -static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_mask_xor_epi64 (__m128i __W, __mmask8 __U, __m128i __A, - __m128i __B) -{ - return (__m128i) __builtin_ia32_pxorq128_mask ((__v2di) __A, - (__v2di) __B, - (__v2di) __W, - (__mmask8) __U); -} - -static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_maskz_xor_epi64 (__mmask8 __U, __m128i __A, __m128i __B) -{ - return (__m128i) __builtin_ia32_pxorq128_mask ((__v2di) __A, - (__v2di) __B, - (__v2di) - _mm_setzero_si128 (), - (__mmask8) __U); + return (__m128i)_mm_mask_xor_epi64(_mm_setzero_si128(), __U, __A, __B); } #define _mm_cmp_epi32_mask(a, b, p) __extension__ ({ \ (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \ - (__v4si)(__m128i)(b), \ - (p), (__mmask8)-1); }) + (__v4si)(__m128i)(b), (int)(p), \ + (__mmask8)-1); }) #define _mm_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \ (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \ - (__v4si)(__m128i)(b), \ - (p), (__mmask8)(m)); }) + (__v4si)(__m128i)(b), (int)(p), \ + (__mmask8)(m)); }) #define _mm_cmp_epu32_mask(a, b, p) __extension__ ({ \ (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \ - (__v4si)(__m128i)(b), \ - (p), (__mmask8)-1); }) + (__v4si)(__m128i)(b), (int)(p), \ + (__mmask8)-1); }) #define _mm_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \ (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \ - (__v4si)(__m128i)(b), \ - (p), (__mmask8)(m)); }) + (__v4si)(__m128i)(b), (int)(p), \ + (__mmask8)(m)); }) #define _mm256_cmp_epi32_mask(a, b, p) __extension__ ({ \ (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \ - (__v8si)(__m256i)(b), \ - (p), (__mmask8)-1); }) + (__v8si)(__m256i)(b), (int)(p), \ + (__mmask8)-1); }) #define _mm256_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \ (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \ - (__v8si)(__m256i)(b), \ - (p), (__mmask8)(m)); }) + (__v8si)(__m256i)(b), (int)(p), \ + (__mmask8)(m)); }) #define _mm256_cmp_epu32_mask(a, b, p) __extension__ ({ \ (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \ - (__v8si)(__m256i)(b), \ - (p), (__mmask8)-1); }) + (__v8si)(__m256i)(b), (int)(p), \ + (__mmask8)-1); }) #define _mm256_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \ (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \ - (__v8si)(__m256i)(b), \ - (p), (__mmask8)(m)); }) + (__v8si)(__m256i)(b), (int)(p), \ + (__mmask8)(m)); }) #define _mm_cmp_epi64_mask(a, b, p) __extension__ ({ \ (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \ - (__v2di)(__m128i)(b), \ - (p), (__mmask8)-1); }) + (__v2di)(__m128i)(b), (int)(p), \ + (__mmask8)-1); }) #define _mm_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \ (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \ - (__v2di)(__m128i)(b), \ - (p), (__mmask8)(m)); }) + (__v2di)(__m128i)(b), (int)(p), \ + (__mmask8)(m)); }) #define _mm_cmp_epu64_mask(a, b, p) __extension__ ({ \ (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \ - (__v2di)(__m128i)(b), \ - (p), (__mmask8)-1); }) + (__v2di)(__m128i)(b), (int)(p), \ + (__mmask8)-1); }) #define _mm_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \ (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \ - (__v2di)(__m128i)(b), \ - (p), (__mmask8)(m)); }) + (__v2di)(__m128i)(b), (int)(p), \ + (__mmask8)(m)); }) #define _mm256_cmp_epi64_mask(a, b, p) __extension__ ({ \ (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \ - (__v4di)(__m256i)(b), \ - (p), (__mmask8)-1); }) + (__v4di)(__m256i)(b), (int)(p), \ + (__mmask8)-1); }) #define _mm256_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \ (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \ - (__v4di)(__m256i)(b), \ - (p), (__mmask8)(m)); }) + (__v4di)(__m256i)(b), (int)(p), \ + (__mmask8)(m)); }) #define _mm256_cmp_epu64_mask(a, b, p) __extension__ ({ \ (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \ - (__v4di)(__m256i)(b), \ - (p), (__mmask8)-1); }) + (__v4di)(__m256i)(b), (int)(p), \ + (__mmask8)-1); }) #define _mm256_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \ (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \ - (__v4di)(__m256i)(b), \ - (p), (__mmask8)(m)); }) + (__v4di)(__m256i)(b), (int)(p), \ + (__mmask8)(m)); }) #define _mm256_cmp_ps_mask(a, b, p) __extension__ ({ \ (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \ - (__v8sf)(__m256)(b), \ - (p), (__mmask8)-1); }) + (__v8sf)(__m256)(b), (int)(p), \ + (__mmask8)-1); }) #define _mm256_mask_cmp_ps_mask(m, a, b, p) __extension__ ({ \ (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \ - (__v8sf)(__m256)(b), \ - (p), (__mmask8)(m)); }) + (__v8sf)(__m256)(b), (int)(p), \ + (__mmask8)(m)); }) #define _mm256_cmp_pd_mask(a, b, p) __extension__ ({ \ - (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256)(a), \ - (__v4df)(__m256)(b), \ - (p), (__mmask8)-1); }) + (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \ + (__v4df)(__m256d)(b), (int)(p), \ + (__mmask8)-1); }) #define _mm256_mask_cmp_pd_mask(m, a, b, p) __extension__ ({ \ - (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256)(a), \ - (__v4df)(__m256)(b), \ - (p), (__mmask8)(m)); }) + (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \ + (__v4df)(__m256d)(b), (int)(p), \ + (__mmask8)(m)); }) -#define _mm128_cmp_ps_mask(a, b, p) __extension__ ({ \ +#define _mm_cmp_ps_mask(a, b, p) __extension__ ({ \ (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \ - (__v4sf)(__m128)(b), \ - (p), (__mmask8)-1); }) + (__v4sf)(__m128)(b), (int)(p), \ + (__mmask8)-1); }) -#define _mm128_mask_cmp_ps_mask(m, a, b, p) __extension__ ({ \ +#define _mm_mask_cmp_ps_mask(m, a, b, p) __extension__ ({ \ (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \ - (__v4sf)(__m128)(b), \ - (p), (__mmask8)(m)); }) + (__v4sf)(__m128)(b), (int)(p), \ + (__mmask8)(m)); }) -#define _mm128_cmp_pd_mask(a, b, p) __extension__ ({ \ - (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128)(a), \ - (__v2df)(__m128)(b), \ - (p), (__mmask8)-1); }) +#define _mm_cmp_pd_mask(a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \ + (__v2df)(__m128d)(b), (int)(p), \ + (__mmask8)-1); }) -#define _mm128_mask_cmp_pd_mask(m, a, b, p) __extension__ ({ \ - (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128)(a), \ - (__v2df)(__m128)(b), \ - (p), (__mmask8)(m)); }) +#define _mm_mask_cmp_pd_mask(m, a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \ + (__v2df)(__m128d)(b), (int)(p), \ + (__mmask8)(m)); }) static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) @@ -1977,6 +1844,7089 @@ _mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) (__mmask8) __U); } +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_add_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_add_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_add_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_add_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_add_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_add_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_add_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_add_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_add_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_add_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_add_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_add_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_add_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_add_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_add_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_add_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_blend_epi32 (__mmask8 __U, __m128i __A, __m128i __W) { + return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U, + (__v4si) __W, + (__v4si) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_blend_epi32 (__mmask8 __U, __m256i __A, __m256i __W) { + return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U, + (__v8si) __W, + (__v8si) __A); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_blend_pd (__mmask8 __U, __m128d __A, __m128d __W) { + return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U, + (__v2df) __W, + (__v2df) __A); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_blend_pd (__mmask8 __U, __m256d __A, __m256d __W) { + return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U, + (__v4df) __W, + (__v4df) __A); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_blend_ps (__mmask8 __U, __m128 __A, __m128 __W) { + return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U, + (__v4sf) __W, + (__v4sf) __A); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_blend_ps (__mmask8 __U, __m256 __A, __m256 __W) { + return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U, + (__v8sf) __W, + (__v8sf) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_blend_epi64 (__mmask8 __U, __m128i __A, __m128i __W) { + return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U, + (__v2di) __W, + (__v2di) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_blend_epi64 (__mmask8 __U, __m256i __A, __m256i __W) { + return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U, + (__v4di) __W, + (__v4di) __A); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_compress_pd (__m128d __W, __mmask8 __U, __m128d __A) { + return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_compress_pd (__mmask8 __U, __m128d __A) { + return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_compress_pd (__m256d __W, __mmask8 __U, __m256d __A) { + return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_compress_pd (__mmask8 __U, __m256d __A) { + return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_compress_epi64 (__m128i __W, __mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_compress_epi64 (__mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_compress_epi64 (__m256i __W, __mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_compress_epi64 (__mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_compress_ps (__m128 __W, __mmask8 __U, __m128 __A) { + return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_compress_ps (__mmask8 __U, __m128 __A) { + return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_compress_ps (__m256 __W, __mmask8 __U, __m256 __A) { + return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_compress_ps (__mmask8 __U, __m256 __A) { + return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_compress_epi32 (__m128i __W, __mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_compress_epi32 (__mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_compress_epi32 (__m256i __W, __mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_compress_epi32 (__mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m128d __A) { + __builtin_ia32_compressstoredf128_mask ((__v2df *) __P, + (__v2df) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m256d __A) { + __builtin_ia32_compressstoredf256_mask ((__v4df *) __P, + (__v4df) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m128i __A) { + __builtin_ia32_compressstoredi128_mask ((__v2di *) __P, + (__v2di) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m256i __A) { + __builtin_ia32_compressstoredi256_mask ((__v4di *) __P, + (__v4di) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m128 __A) { + __builtin_ia32_compressstoresf128_mask ((__v4sf *) __P, + (__v4sf) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m256 __A) { + __builtin_ia32_compressstoresf256_mask ((__v8sf *) __P, + (__v8sf) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m128i __A) { + __builtin_ia32_compressstoresi128_mask ((__v4si *) __P, + (__v4si) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m256i __A) { + __builtin_ia32_compressstoresi256_mask ((__v8si *) __P, + (__v8si) __A, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_cvtepi32_pd (__m128d __W, __mmask8 __U, __m128i __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U, + (__v2df)_mm_cvtepi32_pd(__A), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U, + (__v2df)_mm_cvtepi32_pd(__A), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_cvtepi32_pd (__m256d __W, __mmask8 __U, __m128i __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U, + (__v4df)_mm256_cvtepi32_pd(__A), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U, + (__v4df)_mm256_cvtepi32_pd(__A), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_cvtepi32_ps (__m128 __W, __mmask8 __U, __m128i __A) { + return (__m128) __builtin_ia32_cvtdq2ps128_mask ((__v4si) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_cvtepi32_ps (__mmask16 __U, __m128i __A) { + return (__m128) __builtin_ia32_cvtdq2ps128_mask ((__v4si) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_cvtepi32_ps (__m256 __W, __mmask8 __U, __m256i __A) { + return (__m256) __builtin_ia32_cvtdq2ps256_mask ((__v8si) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepi32_ps (__mmask16 __U, __m256i __A) { + return (__m256) __builtin_ia32_cvtdq2ps256_mask ((__v8si) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtpd_epi32 (__mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A) { + return (__m128i) __builtin_ia32_cvtpd2dq256_mask ((__v4df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtpd_epi32 (__mmask8 __U, __m256d __A) { + return (__m128i) __builtin_ia32_cvtpd2dq256_mask ((__v4df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m128d __A) { + return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_cvtpd_ps (__mmask8 __U, __m128d __A) { + return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm256_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m256d __A) { + return (__m128) __builtin_ia32_cvtpd2ps256_mask ((__v4df) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm256_maskz_cvtpd_ps (__mmask8 __U, __m256d __A) { + return (__m128) __builtin_ia32_cvtpd2ps256_mask ((__v4df) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtpd_epu32 (__m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtpd_epu32 (__mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_cvtpd_epu32 (__m256d __A) { + return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A) { + return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtpd_epu32 (__mmask8 __U, __m256d __A) { + return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvtps2dq128_mask ((__v4sf) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtps_epi32 (__mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvtps2dq128_mask ((__v4sf) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_cvtps_epi32 (__m256i __W, __mmask8 __U, __m256 __A) { + return (__m256i) __builtin_ia32_cvtps2dq256_mask ((__v8sf) __A, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtps_epi32 (__mmask8 __U, __m256 __A) { + return (__m256i) __builtin_ia32_cvtps2dq256_mask ((__v8sf) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_cvtps_pd (__m128d __W, __mmask8 __U, __m128 __A) { + return (__m128d) __builtin_ia32_cvtps2pd128_mask ((__v4sf) __A, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_cvtps_pd (__mmask8 __U, __m128 __A) { + return (__m128d) __builtin_ia32_cvtps2pd128_mask ((__v4sf) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_cvtps_pd (__m256d __W, __mmask8 __U, __m128 __A) { + return (__m256d) __builtin_ia32_cvtps2pd256_mask ((__v4sf) __A, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_cvtps_pd (__mmask8 __U, __m128 __A) { + return (__m256d) __builtin_ia32_cvtps2pd256_mask ((__v4sf) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtps_epu32 (__m128 __A) { + return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtps_epu32 (__mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_cvtps_epu32 (__m256 __A) { + return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_cvtps_epu32 (__m256i __W, __mmask8 __U, __m256 __A) { + return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtps_epu32 (__mmask8 __U, __m256 __A) { + return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvttpd_epi32 (__mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A) { + return (__m128i) __builtin_ia32_cvttpd2dq256_mask ((__v4df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_maskz_cvttpd_epi32 (__mmask8 __U, __m256d __A) { + return (__m128i) __builtin_ia32_cvttpd2dq256_mask ((__v4df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvttpd_epu32 (__m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvttpd_epu32 (__mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_cvttpd_epu32 (__m256d __A) { + return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A) { + return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_maskz_cvttpd_epu32 (__mmask8 __U, __m256d __A) { + return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvttps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvttps2dq128_mask ((__v4sf) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvttps_epi32 (__mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvttps2dq128_mask ((__v4sf) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_cvttps_epi32 (__m256i __W, __mmask8 __U, __m256 __A) { + return (__m256i) __builtin_ia32_cvttps2dq256_mask ((__v8sf) __A, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_cvttps_epi32 (__mmask8 __U, __m256 __A) { + return (__m256i) __builtin_ia32_cvttps2dq256_mask ((__v8sf) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvttps_epu32 (__m128 __A) { + return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvttps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvttps_epu32 (__mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_cvttps_epu32 (__m256 __A) { + return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_cvttps_epu32 (__m256i __W, __mmask8 __U, __m256 __A) { + return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_cvttps_epu32 (__mmask8 __U, __m256 __A) { + return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_cvtepu32_pd (__m128i __A) { + return (__m128d) __builtin_convertvector( + __builtin_shufflevector((__v4su)__A, (__v4su)__A, 0, 1), __v2df); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_cvtepu32_pd (__m128d __W, __mmask8 __U, __m128i __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U, + (__v2df)_mm_cvtepu32_pd(__A), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U, + (__v2df)_mm_cvtepu32_pd(__A), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_cvtepu32_pd (__m128i __A) { + return (__m256d)__builtin_convertvector((__v4su)__A, __v4df); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_cvtepu32_pd (__m256d __W, __mmask8 __U, __m128i __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U, + (__v4df)_mm256_cvtepu32_pd(__A), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U, + (__v4df)_mm256_cvtepu32_pd(__A), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cvtepu32_ps (__m128i __A) { + return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_cvtepu32_ps (__m128 __W, __mmask8 __U, __m128i __A) { + return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_cvtepu32_ps (__mmask8 __U, __m128i __A) { + return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_cvtepu32_ps (__m256i __A) { + return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_cvtepu32_ps (__m256 __W, __mmask8 __U, __m256i __A) { + return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepu32_ps (__mmask8 __U, __m256i __A) { + return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_div_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_div_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_div_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_div_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_div_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_div_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_div_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_div_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_div_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_div_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_div_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_div_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_div_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_div_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_div_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_div_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_expand_pd (__m128d __W, __mmask8 __U, __m128d __A) { + return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_expand_pd (__mmask8 __U, __m128d __A) { + return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_expand_pd (__m256d __W, __mmask8 __U, __m256d __A) { + return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_expand_pd (__mmask8 __U, __m256d __A) { + return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_expand_epi64 (__m128i __W, __mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_expand_epi64 (__mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_expand_epi64 (__m256i __W, __mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_expand_epi64 (__mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_expandloadu_pd (__m128d __W, __mmask8 __U, void const *__P) { + return (__m128d) __builtin_ia32_expandloaddf128_mask ((__v2df *) __P, + (__v2df) __W, + (__mmask8) + __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_expandloadu_pd (__mmask8 __U, void const *__P) { + return (__m128d) __builtin_ia32_expandloaddf128_mask ((__v2df *) __P, + (__v2df) + _mm_setzero_pd (), + (__mmask8) + __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_expandloadu_pd (__m256d __W, __mmask8 __U, void const *__P) { + return (__m256d) __builtin_ia32_expandloaddf256_mask ((__v4df *) __P, + (__v4df) __W, + (__mmask8) + __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_expandloadu_pd (__mmask8 __U, void const *__P) { + return (__m256d) __builtin_ia32_expandloaddf256_mask ((__v4df *) __P, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_expandloadu_epi64 (__m128i __W, __mmask8 __U, void const *__P) { + return (__m128i) __builtin_ia32_expandloaddi128_mask ((__v2di *) __P, + (__v2di) __W, + (__mmask8) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) { + return (__m128i) __builtin_ia32_expandloaddi128_mask ((__v2di *) __P, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_expandloadu_epi64 (__m256i __W, __mmask8 __U, + void const *__P) { + return (__m256i) __builtin_ia32_expandloaddi256_mask ((__v4di *) __P, + (__v4di) __W, + (__mmask8) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) { + return (__m256i) __builtin_ia32_expandloaddi256_mask ((__v4di *) __P, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) + __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_expandloadu_ps (__m128 __W, __mmask8 __U, void const *__P) { + return (__m128) __builtin_ia32_expandloadsf128_mask ((__v4sf *) __P, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_expandloadu_ps (__mmask8 __U, void const *__P) { + return (__m128) __builtin_ia32_expandloadsf128_mask ((__v4sf *) __P, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) + __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_expandloadu_ps (__m256 __W, __mmask8 __U, void const *__P) { + return (__m256) __builtin_ia32_expandloadsf256_mask ((__v8sf *) __P, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_expandloadu_ps (__mmask8 __U, void const *__P) { + return (__m256) __builtin_ia32_expandloadsf256_mask ((__v8sf *) __P, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_expandloadu_epi32 (__m128i __W, __mmask8 __U, void const *__P) { + return (__m128i) __builtin_ia32_expandloadsi128_mask ((__v4si *) __P, + (__v4si) __W, + (__mmask8) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) { + return (__m128i) __builtin_ia32_expandloadsi128_mask ((__v4si *) __P, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_expandloadu_epi32 (__m256i __W, __mmask8 __U, + void const *__P) { + return (__m256i) __builtin_ia32_expandloadsi256_mask ((__v8si *) __P, + (__v8si) __W, + (__mmask8) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) { + return (__m256i) __builtin_ia32_expandloadsi256_mask ((__v8si *) __P, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) + __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_expand_ps (__m128 __W, __mmask8 __U, __m128 __A) { + return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_expand_ps (__mmask8 __U, __m128 __A) { + return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_expand_ps (__m256 __W, __mmask8 __U, __m256 __A) { + return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_expand_ps (__mmask8 __U, __m256 __A) { + return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_expand_epi32 (__m128i __W, __mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_expand_epi32 (__mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_expand_epi32 (__m256i __W, __mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_expand_epi32 (__mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_getexp_pd (__m128d __A) { + return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_getexp_pd (__m128d __W, __mmask8 __U, __m128d __A) { + return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_getexp_pd (__mmask8 __U, __m128d __A) { + return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_getexp_pd (__m256d __A) { + return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_getexp_pd (__m256d __W, __mmask8 __U, __m256d __A) { + return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_getexp_pd (__mmask8 __U, __m256d __A) { + return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_getexp_ps (__m128 __A) { + return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_getexp_ps (__m128 __W, __mmask8 __U, __m128 __A) { + return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_getexp_ps (__mmask8 __U, __m128 __A) { + return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_getexp_ps (__m256 __A) { + return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_getexp_ps (__m256 __W, __mmask8 __U, __m256 __A) { + return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_getexp_ps (__mmask8 __U, __m256 __A) { + return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_max_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_max_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_max_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_max_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_max_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_max_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_max_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_max_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_max_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_max_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_max_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_max_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_max_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_max_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_max_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_max_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_min_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_min_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_min_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_min_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_min_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_min_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_min_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_min_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_min_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_min_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_min_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_min_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_min_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_min_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_min_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_min_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_mul_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_mul_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_mul_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_mul_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_mul_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_mul_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_mul_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_mul_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_mul_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_mul_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_mul_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_mul_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_mul_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_mul_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_mul_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_mul_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_abs_epi32(__m128i __W, __mmask8 __U, __m128i __A) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_abs_epi32(__A), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_abs_epi32(__mmask8 __U, __m128i __A) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_abs_epi32(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_abs_epi32(__m256i __W, __mmask8 __U, __m256i __A) { + return (__m256i)__builtin_ia32_selectd_256((__mmask16)__U, + (__v8si)_mm256_abs_epi32(__A), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_abs_epi32(__mmask8 __U, __m256i __A) { + return (__m256i)__builtin_ia32_selectd_256((__mmask16)__U, + (__v8si)_mm256_abs_epi32(__A), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_abs_epi64 (__m128i __A) { + return (__m128i) __builtin_ia32_pabsq128_mask ((__v2di) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_abs_epi64 (__m128i __W, __mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_pabsq128_mask ((__v2di) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_abs_epi64 (__mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_pabsq128_mask ((__v2di) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_abs_epi64 (__m256i __A) { + return (__m256i) __builtin_ia32_pabsq256_mask ((__v4di) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_abs_epi64 (__m256i __W, __mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_pabsq256_mask ((__v4di) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_abs_epi64 (__mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_pabsq256_mask ((__v4di) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_max_epi32(__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_max_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_max_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_max_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_max_epi32(__mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_max_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_max_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_max_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_max_epi64 (__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i) __builtin_ia32_pmaxsq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_max_epi64 (__m128i __W, __mmask8 __M, __m128i __A, + __m128i __B) { + return (__m128i) __builtin_ia32_pmaxsq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_max_epi64 (__m128i __A, __m128i __B) { + return (__m128i) __builtin_ia32_pmaxsq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_max_epi64 (__mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i) __builtin_ia32_pmaxsq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_max_epi64 (__m256i __W, __mmask8 __M, __m256i __A, + __m256i __B) { + return (__m256i) __builtin_ia32_pmaxsq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_max_epi64 (__m256i __A, __m256i __B) { + return (__m256i) __builtin_ia32_pmaxsq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_max_epu32(__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_max_epu32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_max_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_max_epu32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_max_epu32(__mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_max_epu32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_max_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_max_epu32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_max_epu64 (__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i) __builtin_ia32_pmaxuq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_max_epu64 (__m128i __A, __m128i __B) { + return (__m128i) __builtin_ia32_pmaxuq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_max_epu64 (__m128i __W, __mmask8 __M, __m128i __A, + __m128i __B) { + return (__m128i) __builtin_ia32_pmaxuq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_max_epu64 (__mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i) __builtin_ia32_pmaxuq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_max_epu64 (__m256i __A, __m256i __B) { + return (__m256i) __builtin_ia32_pmaxuq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_max_epu64 (__m256i __W, __mmask8 __M, __m256i __A, + __m256i __B) { + return (__m256i) __builtin_ia32_pmaxuq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_min_epi32(__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_min_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_min_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_min_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_min_epi32(__mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_min_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_min_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_min_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_min_epi64 (__m128i __A, __m128i __B) { + return (__m128i) __builtin_ia32_pminsq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_min_epi64 (__m128i __W, __mmask8 __M, __m128i __A, + __m128i __B) { + return (__m128i) __builtin_ia32_pminsq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_min_epi64 (__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i) __builtin_ia32_pminsq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_min_epi64 (__m256i __A, __m256i __B) { + return (__m256i) __builtin_ia32_pminsq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_min_epi64 (__m256i __W, __mmask8 __M, __m256i __A, + __m256i __B) { + return (__m256i) __builtin_ia32_pminsq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_min_epi64 (__mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i) __builtin_ia32_pminsq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_min_epu32(__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_min_epu32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_min_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_min_epu32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_min_epu32(__mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_min_epu32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_min_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_min_epu32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_min_epu64 (__m128i __A, __m128i __B) { + return (__m128i) __builtin_ia32_pminuq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_min_epu64 (__m128i __W, __mmask8 __M, __m128i __A, + __m128i __B) { + return (__m128i) __builtin_ia32_pminuq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_min_epu64 (__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i) __builtin_ia32_pminuq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_min_epu64 (__m256i __A, __m256i __B) { + return (__m256i) __builtin_ia32_pminuq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_min_epu64 (__m256i __W, __mmask8 __M, __m256i __A, + __m256i __B) { + return (__m256i) __builtin_ia32_pminuq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_min_epu64 (__mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i) __builtin_ia32_pminuq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + __M); +} + +#define _mm_roundscale_pd(A, imm) __extension__ ({ \ + (__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \ + (int)(imm), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1); }) + + +#define _mm_mask_roundscale_pd(W, U, A, imm) __extension__ ({ \ + (__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \ + (int)(imm), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U)); }) + + +#define _mm_maskz_roundscale_pd(U, A, imm) __extension__ ({ \ + (__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \ + (int)(imm), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U)); }) + + +#define _mm256_roundscale_pd(A, imm) __extension__ ({ \ + (__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \ + (int)(imm), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)-1); }) + + +#define _mm256_mask_roundscale_pd(W, U, A, imm) __extension__ ({ \ + (__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \ + (int)(imm), \ + (__v4df)(__m256d)(W), \ + (__mmask8)(U)); }) + + +#define _mm256_maskz_roundscale_pd(U, A, imm) __extension__ ({ \ + (__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \ + (int)(imm), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)(U)); }) + +#define _mm_roundscale_ps(A, imm) __extension__ ({ \ + (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1); }) + + +#define _mm_mask_roundscale_ps(W, U, A, imm) __extension__ ({ \ + (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U)); }) + + +#define _mm_maskz_roundscale_ps(U, A, imm) __extension__ ({ \ + (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U)); }) + +#define _mm256_roundscale_ps(A, imm) __extension__ ({ \ + (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)-1); }) + +#define _mm256_mask_roundscale_ps(W, U, A, imm) __extension__ ({ \ + (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \ + (__v8sf)(__m256)(W), \ + (__mmask8)(U)); }) + + +#define _mm256_maskz_roundscale_ps(U, A, imm) __extension__ ({ \ + (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)(U)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_scalef_pd (__m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_scalef_pd (__m128d __W, __mmask8 __U, __m128d __A, + __m128d __B) { + return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_scalef_pd (__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_scalef_pd (__m256d __A, __m256d __B) { + return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_scalef_pd (__m256d __W, __mmask8 __U, __m256d __A, + __m256d __B) { + return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_scalef_pd (__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_scalef_ps (__m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_scalef_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_scalef_ps (__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_scalef_ps (__m256 __A, __m256 __B) { + return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_scalef_ps (__m256 __W, __mmask8 __U, __m256 __A, + __m256 __B) { + return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_scalef_ps (__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +#define _mm_i64scatter_pd(addr, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scatterdiv2df((double *)(addr), (__mmask8)-1, \ + (__v2di)(__m128i)(index), \ + (__v2df)(__m128d)(v1), (int)(scale)); }) + +#define _mm_mask_i64scatter_pd(addr, mask, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scatterdiv2df((double *)(addr), (__mmask8)(mask), \ + (__v2di)(__m128i)(index), \ + (__v2df)(__m128d)(v1), (int)(scale)); }) + +#define _mm_i64scatter_epi64(addr, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scatterdiv2di((long long *)(addr), (__mmask8)-1, \ + (__v2di)(__m128i)(index), \ + (__v2di)(__m128i)(v1), (int)(scale)); }) + +#define _mm_mask_i64scatter_epi64(addr, mask, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scatterdiv2di((long long *)(addr), (__mmask8)(mask), \ + (__v2di)(__m128i)(index), \ + (__v2di)(__m128i)(v1), (int)(scale)); }) + +#define _mm256_i64scatter_pd(addr, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scatterdiv4df((double *)(addr), (__mmask8)-1, \ + (__v4di)(__m256i)(index), \ + (__v4df)(__m256d)(v1), (int)(scale)); }) + +#define _mm256_mask_i64scatter_pd(addr, mask, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scatterdiv4df((double *)(addr), (__mmask8)(mask), \ + (__v4di)(__m256i)(index), \ + (__v4df)(__m256d)(v1), (int)(scale)); }) + +#define _mm256_i64scatter_epi64(addr, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scatterdiv4di((long long *)(addr), (__mmask8)-1, \ + (__v4di)(__m256i)(index), \ + (__v4di)(__m256i)(v1), (int)(scale)); }) + +#define _mm256_mask_i64scatter_epi64(addr, mask, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scatterdiv4di((long long *)(addr), (__mmask8)(mask), \ + (__v4di)(__m256i)(index), \ + (__v4di)(__m256i)(v1), (int)(scale)); }) + +#define _mm_i64scatter_ps(addr, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scatterdiv4sf((float *)(addr), (__mmask8)-1, \ + (__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \ + (int)(scale)); }) + +#define _mm_mask_i64scatter_ps(addr, mask, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scatterdiv4sf((float *)(addr), (__mmask8)(mask), \ + (__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \ + (int)(scale)); }) + +#define _mm_i64scatter_epi32(addr, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scatterdiv4si((int *)(addr), (__mmask8)-1, \ + (__v2di)(__m128i)(index), \ + (__v4si)(__m128i)(v1), (int)(scale)); }) + +#define _mm_mask_i64scatter_epi32(addr, mask, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scatterdiv4si((int *)(addr), (__mmask8)(mask), \ + (__v2di)(__m128i)(index), \ + (__v4si)(__m128i)(v1), (int)(scale)); }) + +#define _mm256_i64scatter_ps(addr, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scatterdiv8sf((float *)(addr), (__mmask8)-1, \ + (__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \ + (int)(scale)); }) + +#define _mm256_mask_i64scatter_ps(addr, mask, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scatterdiv8sf((float *)(addr), (__mmask8)(mask), \ + (__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \ + (int)(scale)); }) + +#define _mm256_i64scatter_epi32(addr, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scatterdiv8si((int *)(addr), (__mmask8)-1, \ + (__v4di)(__m256i)(index), \ + (__v4si)(__m128i)(v1), (int)(scale)); }) + +#define _mm256_mask_i64scatter_epi32(addr, mask, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scatterdiv8si((int *)(addr), (__mmask8)(mask), \ + (__v4di)(__m256i)(index), \ + (__v4si)(__m128i)(v1), (int)(scale)); }) + +#define _mm_i32scatter_pd(addr, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scattersiv2df((double *)(addr), (__mmask8)-1, \ + (__v4si)(__m128i)(index), \ + (__v2df)(__m128d)(v1), (int)(scale)); }) + +#define _mm_mask_i32scatter_pd(addr, mask, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scattersiv2df((double *)(addr), (__mmask8)(mask), \ + (__v4si)(__m128i)(index), \ + (__v2df)(__m128d)(v1), (int)(scale)); }) + +#define _mm_i32scatter_epi64(addr, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scattersiv2di((long long *)(addr), (__mmask8)-1, \ + (__v4si)(__m128i)(index), \ + (__v2di)(__m128i)(v1), (int)(scale)); }) + +#define _mm_mask_i32scatter_epi64(addr, mask, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scattersiv2di((long long *)(addr), (__mmask8)(mask), \ + (__v4si)(__m128i)(index), \ + (__v2di)(__m128i)(v1), (int)(scale)); }) + +#define _mm256_i32scatter_pd(addr, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scattersiv4df((double *)(addr), (__mmask8)-1, \ + (__v4si)(__m128i)(index), \ + (__v4df)(__m256d)(v1), (int)(scale)); }) + +#define _mm256_mask_i32scatter_pd(addr, mask, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scattersiv4df((double *)(addr), (__mmask8)(mask), \ + (__v4si)(__m128i)(index), \ + (__v4df)(__m256d)(v1), (int)(scale)); }) + +#define _mm256_i32scatter_epi64(addr, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scattersiv4di((long long *)(addr), (__mmask8)-1, \ + (__v4si)(__m128i)(index), \ + (__v4di)(__m256i)(v1), (int)(scale)); }) + +#define _mm256_mask_i32scatter_epi64(addr, mask, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scattersiv4di((long long *)(addr), (__mmask8)(mask), \ + (__v4si)(__m128i)(index), \ + (__v4di)(__m256i)(v1), (int)(scale)); }) + +#define _mm_i32scatter_ps(addr, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scattersiv4sf((float *)(addr), (__mmask8)-1, \ + (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \ + (int)(scale)); }) + +#define _mm_mask_i32scatter_ps(addr, mask, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scattersiv4sf((float *)(addr), (__mmask8)(mask), \ + (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \ + (int)(scale)); }) + +#define _mm_i32scatter_epi32(addr, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scattersiv4si((int *)(addr), (__mmask8)-1, \ + (__v4si)(__m128i)(index), \ + (__v4si)(__m128i)(v1), (int)(scale)); }) + +#define _mm_mask_i32scatter_epi32(addr, mask, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scattersiv4si((int *)(addr), (__mmask8)(mask), \ + (__v4si)(__m128i)(index), \ + (__v4si)(__m128i)(v1), (int)(scale)); }) + +#define _mm256_i32scatter_ps(addr, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scattersiv8sf((float *)(addr), (__mmask8)-1, \ + (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \ + (int)(scale)); }) + +#define _mm256_mask_i32scatter_ps(addr, mask, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scattersiv8sf((float *)(addr), (__mmask8)(mask), \ + (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \ + (int)(scale)); }) + +#define _mm256_i32scatter_epi32(addr, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scattersiv8si((int *)(addr), (__mmask8)-1, \ + (__v8si)(__m256i)(index), \ + (__v8si)(__m256i)(v1), (int)(scale)); }) + +#define _mm256_mask_i32scatter_epi32(addr, mask, index, v1, scale) __extension__ ({ \ + __builtin_ia32_scattersiv8si((int *)(addr), (__mmask8)(mask), \ + (__v8si)(__m256i)(index), \ + (__v8si)(__m256i)(v1), (int)(scale)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_sqrt_pd(__m128d __W, __mmask8 __U, __m128d __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_sqrt_pd(__A), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_sqrt_pd(__mmask8 __U, __m128d __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_sqrt_pd(__A), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_sqrt_pd(__m256d __W, __mmask8 __U, __m256d __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_sqrt_pd(__A), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_sqrt_pd(__mmask8 __U, __m256d __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_sqrt_pd(__A), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_sqrt_ps(__m128 __W, __mmask8 __U, __m128 __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_sqrt_ps(__A), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_sqrt_ps(__mmask8 __U, __m128 __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_sqrt_ps(__A), + (__v4sf)_mm_setzero_pd()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_sqrt_ps(__m256 __W, __mmask8 __U, __m256 __A) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_sqrt_ps(__A), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_sqrt_ps(__mmask8 __U, __m256 __A) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_sqrt_ps(__A), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_sub_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_sub_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_sub_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_sub_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_sub_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_sub_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_sub_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_sub_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_sub_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_sub_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_sub_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_sub_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_sub_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_sub_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_sub_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_sub_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask2_permutex2var_epi32 (__m128i __A, __m128i __I, __mmask8 __U, + __m128i __B) { + return (__m128i) __builtin_ia32_vpermi2vard128_mask ((__v4si) __A, + (__v4si) __I + /* idx */ , + (__v4si) __B, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask2_permutex2var_epi32 (__m256i __A, __m256i __I, + __mmask8 __U, __m256i __B) { + return (__m256i) __builtin_ia32_vpermi2vard256_mask ((__v8si) __A, + (__v8si) __I + /* idx */ , + (__v8si) __B, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask2_permutex2var_pd (__m128d __A, __m128i __I, __mmask8 __U, + __m128d __B) { + return (__m128d) __builtin_ia32_vpermi2varpd128_mask ((__v2df) __A, + (__v2di) __I + /* idx */ , + (__v2df) __B, + (__mmask8) + __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask2_permutex2var_pd (__m256d __A, __m256i __I, __mmask8 __U, + __m256d __B) { + return (__m256d) __builtin_ia32_vpermi2varpd256_mask ((__v4df) __A, + (__v4di) __I + /* idx */ , + (__v4df) __B, + (__mmask8) + __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask2_permutex2var_ps (__m128 __A, __m128i __I, __mmask8 __U, + __m128 __B) { + return (__m128) __builtin_ia32_vpermi2varps128_mask ((__v4sf) __A, + (__v4si) __I + /* idx */ , + (__v4sf) __B, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask2_permutex2var_ps (__m256 __A, __m256i __I, __mmask8 __U, + __m256 __B) { + return (__m256) __builtin_ia32_vpermi2varps256_mask ((__v8sf) __A, + (__v8si) __I + /* idx */ , + (__v8sf) __B, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask2_permutex2var_epi64 (__m128i __A, __m128i __I, __mmask8 __U, + __m128i __B) { + return (__m128i) __builtin_ia32_vpermi2varq128_mask ((__v2di) __A, + (__v2di) __I + /* idx */ , + (__v2di) __B, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask2_permutex2var_epi64 (__m256i __A, __m256i __I, + __mmask8 __U, __m256i __B) { + return (__m256i) __builtin_ia32_vpermi2varq256_mask ((__v4di) __A, + (__v4di) __I + /* idx */ , + (__v4di) __B, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_permutex2var_epi32 (__m128i __A, __m128i __I, __m128i __B) { + return (__m128i) __builtin_ia32_vpermt2vard128_mask ((__v4si) __I + /* idx */ , + (__v4si) __A, + (__v4si) __B, + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_permutex2var_epi32 (__m128i __A, __mmask8 __U, __m128i __I, + __m128i __B) { + return (__m128i) __builtin_ia32_vpermt2vard128_mask ((__v4si) __I + /* idx */ , + (__v4si) __A, + (__v4si) __B, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_permutex2var_epi32 (__mmask8 __U, __m128i __A, __m128i __I, + __m128i __B) { + return (__m128i) __builtin_ia32_vpermt2vard128_maskz ((__v4si) __I + /* idx */ , + (__v4si) __A, + (__v4si) __B, + (__mmask8) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_permutex2var_epi32 (__m256i __A, __m256i __I, __m256i __B) { + return (__m256i) __builtin_ia32_vpermt2vard256_mask ((__v8si) __I + /* idx */ , + (__v8si) __A, + (__v8si) __B, + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_permutex2var_epi32 (__m256i __A, __mmask8 __U, __m256i __I, + __m256i __B) { + return (__m256i) __builtin_ia32_vpermt2vard256_mask ((__v8si) __I + /* idx */ , + (__v8si) __A, + (__v8si) __B, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_permutex2var_epi32 (__mmask8 __U, __m256i __A, + __m256i __I, __m256i __B) { + return (__m256i) __builtin_ia32_vpermt2vard256_maskz ((__v8si) __I + /* idx */ , + (__v8si) __A, + (__v8si) __B, + (__mmask8) + __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_permutex2var_pd (__m128d __A, __m128i __I, __m128d __B) { + return (__m128d) __builtin_ia32_vpermt2varpd128_mask ((__v2di) __I + /* idx */ , + (__v2df) __A, + (__v2df) __B, + (__mmask8) - + 1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_permutex2var_pd (__m128d __A, __mmask8 __U, __m128i __I, + __m128d __B) { + return (__m128d) __builtin_ia32_vpermt2varpd128_mask ((__v2di) __I + /* idx */ , + (__v2df) __A, + (__v2df) __B, + (__mmask8) + __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_permutex2var_pd (__mmask8 __U, __m128d __A, __m128i __I, + __m128d __B) { + return (__m128d) __builtin_ia32_vpermt2varpd128_maskz ((__v2di) __I + /* idx */ , + (__v2df) __A, + (__v2df) __B, + (__mmask8) + __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_permutex2var_pd (__m256d __A, __m256i __I, __m256d __B) { + return (__m256d) __builtin_ia32_vpermt2varpd256_mask ((__v4di) __I + /* idx */ , + (__v4df) __A, + (__v4df) __B, + (__mmask8) - + 1); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_permutex2var_pd (__m256d __A, __mmask8 __U, __m256i __I, + __m256d __B) { + return (__m256d) __builtin_ia32_vpermt2varpd256_mask ((__v4di) __I + /* idx */ , + (__v4df) __A, + (__v4df) __B, + (__mmask8) + __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_permutex2var_pd (__mmask8 __U, __m256d __A, __m256i __I, + __m256d __B) { + return (__m256d) __builtin_ia32_vpermt2varpd256_maskz ((__v4di) __I + /* idx */ , + (__v4df) __A, + (__v4df) __B, + (__mmask8) + __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_permutex2var_ps (__m128 __A, __m128i __I, __m128 __B) { + return (__m128) __builtin_ia32_vpermt2varps128_mask ((__v4si) __I + /* idx */ , + (__v4sf) __A, + (__v4sf) __B, + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_permutex2var_ps (__m128 __A, __mmask8 __U, __m128i __I, + __m128 __B) { + return (__m128) __builtin_ia32_vpermt2varps128_mask ((__v4si) __I + /* idx */ , + (__v4sf) __A, + (__v4sf) __B, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_permutex2var_ps (__mmask8 __U, __m128 __A, __m128i __I, + __m128 __B) { + return (__m128) __builtin_ia32_vpermt2varps128_maskz ((__v4si) __I + /* idx */ , + (__v4sf) __A, + (__v4sf) __B, + (__mmask8) + __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_permutex2var_ps (__m256 __A, __m256i __I, __m256 __B) { + return (__m256) __builtin_ia32_vpermt2varps256_mask ((__v8si) __I + /* idx */ , + (__v8sf) __A, + (__v8sf) __B, + (__mmask8) -1); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_permutex2var_ps (__m256 __A, __mmask8 __U, __m256i __I, + __m256 __B) { + return (__m256) __builtin_ia32_vpermt2varps256_mask ((__v8si) __I + /* idx */ , + (__v8sf) __A, + (__v8sf) __B, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_permutex2var_ps (__mmask8 __U, __m256 __A, __m256i __I, + __m256 __B) { + return (__m256) __builtin_ia32_vpermt2varps256_maskz ((__v8si) __I + /* idx */ , + (__v8sf) __A, + (__v8sf) __B, + (__mmask8) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_permutex2var_epi64 (__m128i __A, __m128i __I, __m128i __B) { + return (__m128i) __builtin_ia32_vpermt2varq128_mask ((__v2di) __I + /* idx */ , + (__v2di) __A, + (__v2di) __B, + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_permutex2var_epi64 (__m128i __A, __mmask8 __U, __m128i __I, + __m128i __B) { + return (__m128i) __builtin_ia32_vpermt2varq128_mask ((__v2di) __I + /* idx */ , + (__v2di) __A, + (__v2di) __B, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_permutex2var_epi64 (__mmask8 __U, __m128i __A, __m128i __I, + __m128i __B) { + return (__m128i) __builtin_ia32_vpermt2varq128_maskz ((__v2di) __I + /* idx */ , + (__v2di) __A, + (__v2di) __B, + (__mmask8) + __U); +} + + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_permutex2var_epi64 (__m256i __A, __m256i __I, __m256i __B) { + return (__m256i) __builtin_ia32_vpermt2varq256_mask ((__v4di) __I + /* idx */ , + (__v4di) __A, + (__v4di) __B, + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_permutex2var_epi64 (__m256i __A, __mmask8 __U, __m256i __I, + __m256i __B) { + return (__m256i) __builtin_ia32_vpermt2varq256_mask ((__v4di) __I + /* idx */ , + (__v4di) __A, + (__v4di) __B, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_permutex2var_epi64 (__mmask8 __U, __m256i __A, + __m256i __I, __m256i __B) { + return (__m256i) __builtin_ia32_vpermt2varq256_maskz ((__v4di) __I + /* idx */ , + (__v4di) __A, + (__v4di) __B, + (__mmask8) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtepi8_epi32(__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtepi8_epi32(__A), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtepi8_epi32(__mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtepi8_epi32(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_cvtepi8_epi32 (__m256i __W, __mmask8 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtepi8_epi32(__A), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepi8_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtepi8_epi32(__A), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtepi8_epi64(__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepi8_epi64(__A), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepi8_epi64(__A), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_cvtepi8_epi64(__m256i __W, __mmask8 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepi8_epi64(__A), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepi8_epi64(__A), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtepi32_epi64(__m128i __W, __mmask8 __U, __m128i __X) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepi32_epi64(__X), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepi32_epi64(__X), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_cvtepi32_epi64(__m256i __W, __mmask8 __U, __m128i __X) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepi32_epi64(__X), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepi32_epi64(__X), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtepi16_epi32(__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtepi16_epi32(__A), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtepi16_epi32(__mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtepi16_epi32(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_cvtepi16_epi32(__m256i __W, __mmask8 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtepi16_epi32(__A), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepi16_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtepi16_epi32(__A), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtepi16_epi64(__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepi16_epi64(__A), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepi16_epi64(__A), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_cvtepi16_epi64(__m256i __W, __mmask8 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepi16_epi64(__A), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepi16_epi64(__A), + (__v4di)_mm256_setzero_si256()); +} + + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtepu8_epi32(__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtepu8_epi32(__A), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtepu8_epi32(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_cvtepu8_epi32(__m256i __W, __mmask8 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtepu8_epi32(__A), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtepu8_epi32(__A), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtepu8_epi64(__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepu8_epi64(__A), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepu8_epi64(__A), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_cvtepu8_epi64(__m256i __W, __mmask8 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepu8_epi64(__A), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepu8_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepu8_epi64(__A), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtepu32_epi64(__m128i __W, __mmask8 __U, __m128i __X) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepu32_epi64(__X), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepu32_epi64(__X), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_cvtepu32_epi64(__m256i __W, __mmask8 __U, __m128i __X) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepu32_epi64(__X), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepu32_epi64(__X), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtepu16_epi32(__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtepu16_epi32(__A), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtepu16_epi32(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_cvtepu16_epi32(__m256i __W, __mmask8 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtepu16_epi32(__A), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtepu16_epi32(__A), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtepu16_epi64(__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepu16_epi64(__A), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepu16_epi64(__A), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_cvtepu16_epi64(__m256i __W, __mmask8 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepu16_epi64(__A), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepu16_epi64(__A), + (__v4di)_mm256_setzero_si256()); +} + + +#define _mm_rol_epi32(a, b) __extension__ ({\ + (__m128i)__builtin_ia32_prold128_mask((__v4si)(__m128i)(a), (int)(b), \ + (__v4si)_mm_setzero_si128(), \ + (__mmask8)-1); }) + +#define _mm_mask_rol_epi32(w, u, a, b) __extension__ ({\ + (__m128i)__builtin_ia32_prold128_mask((__v4si)(__m128i)(a), (int)(b), \ + (__v4si)(__m128i)(w), (__mmask8)(u)); }) + +#define _mm_maskz_rol_epi32(u, a, b) __extension__ ({\ + (__m128i)__builtin_ia32_prold128_mask((__v4si)(__m128i)(a), (int)(b), \ + (__v4si)_mm_setzero_si128(), \ + (__mmask8)(u)); }) + +#define _mm256_rol_epi32(a, b) __extension__ ({\ + (__m256i)__builtin_ia32_prold256_mask((__v8si)(__m256i)(a), (int)(b), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)-1); }) + +#define _mm256_mask_rol_epi32(w, u, a, b) __extension__ ({\ + (__m256i)__builtin_ia32_prold256_mask((__v8si)(__m256i)(a), (int)(b), \ + (__v8si)(__m256i)(w), (__mmask8)(u)); }) + +#define _mm256_maskz_rol_epi32(u, a, b) __extension__ ({\ + (__m256i)__builtin_ia32_prold256_mask((__v8si)(__m256i)(a), (int)(b), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)(u)); }) + +#define _mm_rol_epi64(a, b) __extension__ ({\ + (__m128i)__builtin_ia32_prolq128_mask((__v2di)(__m128i)(a), (int)(b), \ + (__v2di)_mm_setzero_di(), \ + (__mmask8)-1); }) + +#define _mm_mask_rol_epi64(w, u, a, b) __extension__ ({\ + (__m128i)__builtin_ia32_prolq128_mask((__v2di)(__m128i)(a), (int)(b), \ + (__v2di)(__m128i)(w), (__mmask8)(u)); }) + +#define _mm_maskz_rol_epi64(u, a, b) __extension__ ({\ + (__m128i)__builtin_ia32_prolq128_mask((__v2di)(__m128i)(a), (int)(b), \ + (__v2di)_mm_setzero_di(), \ + (__mmask8)(u)); }) + +#define _mm256_rol_epi64(a, b) __extension__ ({\ + (__m256i)__builtin_ia32_prolq256_mask((__v4di)(__m256i)(a), (int)(b), \ + (__v4di)_mm256_setzero_si256(), \ + (__mmask8)-1); }) + +#define _mm256_mask_rol_epi64(w, u, a, b) __extension__ ({\ + (__m256i)__builtin_ia32_prolq256_mask((__v4di)(__m256i)(a), (int)(b), \ + (__v4di)(__m256i)(w), (__mmask8)(u)); }) + +#define _mm256_maskz_rol_epi64(u, a, b) __extension__ ({\ + (__m256i)__builtin_ia32_prolq256_mask((__v4di)(__m256i)(a), (int)(b), \ + (__v4di)_mm256_setzero_si256(), \ + (__mmask8)(u)); }) + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_rolv_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_prolvd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_rolv_epi32 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_prolvd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_rolv_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_prolvd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_rolv_epi32 (__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_prolvd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_rolv_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_prolvd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_rolv_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_prolvd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_rolv_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_prolvq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_di (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_rolv_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_prolvq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_rolv_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_prolvq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_di (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_rolv_epi64 (__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_prolvq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_rolv_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_prolvq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_rolv_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_prolvq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +#define _mm_ror_epi32(A, B) __extension__ ({ \ + (__m128i)__builtin_ia32_prord128_mask((__v4si)(__m128i)(A), (int)(B), \ + (__v4si)_mm_setzero_si128(), \ + (__mmask8)-1); }) + +#define _mm_mask_ror_epi32(W, U, A, B) __extension__ ({ \ + (__m128i)__builtin_ia32_prord128_mask((__v4si)(__m128i)(A), (int)(B), \ + (__v4si)(__m128i)(W), (__mmask8)(U)); }) + +#define _mm_maskz_ror_epi32(U, A, B) __extension__ ({ \ + (__m128i)__builtin_ia32_prord128_mask((__v4si)(__m128i)(A), (int)(B), \ + (__v4si)_mm_setzero_si128(), \ + (__mmask8)(U)); }) + +#define _mm256_ror_epi32(A, B) __extension__ ({ \ + (__m256i)__builtin_ia32_prord256_mask((__v8si)(__m256i)(A), (int)(B), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)-1); }) + +#define _mm256_mask_ror_epi32(W, U, A, B) __extension__ ({ \ + (__m256i)__builtin_ia32_prord256_mask((__v8si)(__m256i)(A), (int)(B), \ + (__v8si)(__m256i)(W), (__mmask8)(U)); }) + +#define _mm256_maskz_ror_epi32(U, A, B) __extension__ ({ \ + (__m256i)__builtin_ia32_prord256_mask((__v8si)(__m256i)(A), (int)(B), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)(U)); }) + +#define _mm_ror_epi64(A, B) __extension__ ({ \ + (__m128i)__builtin_ia32_prorq128_mask((__v2di)(__m128i)(A), (int)(B), \ + (__v2di)_mm_setzero_di(), \ + (__mmask8)-1); }) + +#define _mm_mask_ror_epi64(W, U, A, B) __extension__ ({ \ + (__m128i)__builtin_ia32_prorq128_mask((__v2di)(__m128i)(A), (int)(B), \ + (__v2di)(__m128i)(W), (__mmask8)(U)); }) + +#define _mm_maskz_ror_epi64(U, A, B) __extension__ ({ \ + (__m128i)__builtin_ia32_prorq128_mask((__v2di)(__m128i)(A), (int)(B), \ + (__v2di)_mm_setzero_di(), \ + (__mmask8)(U)); }) + +#define _mm256_ror_epi64(A, B) __extension__ ({ \ + (__m256i)__builtin_ia32_prorq256_mask((__v4di)(__m256i)(A), (int)(B), \ + (__v4di)_mm256_setzero_si256(), \ + (__mmask8)-1); }) + +#define _mm256_mask_ror_epi64(W, U, A, B) __extension__ ({ \ + (__m256i)__builtin_ia32_prorq256_mask((__v4di)(__m256i)(A), (int)(B), \ + (__v4di)(__m256i)(W), (__mmask8)(U)); }) + +#define _mm256_maskz_ror_epi64(U, A, B) __extension__ ({ \ + (__m256i)__builtin_ia32_prorq256_mask((__v4di)(__m256i)(A), (int)(B), \ + (__v4di)_mm256_setzero_si256(), \ + (__mmask8)(U)); }) + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_sll_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_sll_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_sll_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_sll_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_sll_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_sll_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_sll_epi32(__mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_sll_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_slli_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_slli_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_slli_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_slli_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_sll_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_sll_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_sll_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_sll_epi64(__A, __B), + (__v2di)_mm_setzero_di()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_sll_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_sll_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_sll_epi64(__mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_sll_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_slli_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_slli_epi64(__A, __B), + (__v2di)_mm_setzero_di()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_slli_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_slli_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_rorv_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_prorvd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_rorv_epi32 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_prorvd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_rorv_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_prorvd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_rorv_epi32 (__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_prorvd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_rorv_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_prorvd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_rorv_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_prorvd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_rorv_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_prorvq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_di (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_rorv_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_prorvq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_rorv_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_prorvq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_di (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_rorv_epi64 (__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_prorvq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_rorv_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_prorvq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_rorv_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_prorvq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_sllv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_sllv_epi64(__X, __Y), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_sllv_epi64(__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_sllv_epi64(__X, __Y), + (__v2di)_mm_setzero_di()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_sllv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_sllv_epi64(__X, __Y), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_sllv_epi64(__mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_sllv_epi64(__X, __Y), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_sllv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_sllv_epi32(__X, __Y), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_sllv_epi32(__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_sllv_epi32(__X, __Y), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_sllv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_sllv_epi32(__X, __Y), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_sllv_epi32(__mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_sllv_epi32(__X, __Y), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_srlv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_srlv_epi64(__X, __Y), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_srlv_epi64(__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_srlv_epi64(__X, __Y), + (__v2di)_mm_setzero_di()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_srlv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_srlv_epi64(__X, __Y), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_srlv_epi64(__mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_srlv_epi64(__X, __Y), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_srlv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srlv_epi32(__X, __Y), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_srlv_epi32(__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srlv_epi32(__X, __Y), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_srlv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srlv_epi32(__X, __Y), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_srlv_epi32(__mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srlv_epi32(__X, __Y), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_srl_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srl_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_srl_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srl_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_srl_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srl_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_srl_epi32(__mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srl_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srli_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srli_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srli_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srli_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_srl_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_srl_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_srl_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_srl_epi64(__A, __B), + (__v2di)_mm_setzero_di()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_srl_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_srl_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_srl_epi64(__mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_srl_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_srli_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_srli_epi64(__A, __B), + (__v2di)_mm_setzero_di()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_srli_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_srli_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_srav_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srav_epi32(__X, __Y), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_srav_epi32(__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srav_epi32(__X, __Y), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_srav_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srav_epi32(__X, __Y), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_srav_epi32(__mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srav_epi32(__X, __Y), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_srav_epi64(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_psravq128((__v2di)__X, (__v2di)__Y); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_srav_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_srav_epi64(__X, __Y), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_srav_epi64(__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_srav_epi64(__X, __Y), + (__v2di)_mm_setzero_di()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_srav_epi64(__m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_psravq256((__v4di)__X, (__v4di) __Y); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_srav_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_srav_epi64(__X, __Y), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_srav_epi64 (__mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_srav_epi64(__X, __Y), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_mov_epi32 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U, + (__v4si) __A, + (__v4si) __W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_mov_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U, + (__v4si) __A, + (__v4si) _mm_setzero_si128 ()); +} + + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_mov_epi32 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U, + (__v8si) __A, + (__v8si) __W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_mov_epi32 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U, + (__v8si) __A, + (__v8si) _mm256_setzero_si256 ()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_load_epi32 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_movdqa32load128_mask ((__v4si *) __P, + (__v4si) __W, + (__mmask8) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_load_epi32 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_movdqa32load128_mask ((__v4si *) __P, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_load_epi32 (__m256i __W, __mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_movdqa32load256_mask ((__v8si *) __P, + (__v8si) __W, + (__mmask8) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_load_epi32 (__mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_movdqa32load256_mask ((__v8si *) __P, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) + __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_store_epi32 (void *__P, __mmask8 __U, __m128i __A) +{ + __builtin_ia32_movdqa32store128_mask ((__v4si *) __P, + (__v4si) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_store_epi32 (void *__P, __mmask8 __U, __m256i __A) +{ + __builtin_ia32_movdqa32store256_mask ((__v8si *) __P, + (__v8si) __A, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_mov_epi64 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U, + (__v2di) __A, + (__v2di) __W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_mov_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U, + (__v2di) __A, + (__v2di) _mm_setzero_di ()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_mov_epi64 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U, + (__v4di) __A, + (__v4di) __W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_mov_epi64 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U, + (__v4di) __A, + (__v4di) _mm256_setzero_si256 ()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_load_epi64 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_movdqa64load128_mask ((__v2di *) __P, + (__v2di) __W, + (__mmask8) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_load_epi64 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_movdqa64load128_mask ((__v2di *) __P, + (__v2di) + _mm_setzero_di (), + (__mmask8) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_load_epi64 (__m256i __W, __mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_movdqa64load256_mask ((__v4di *) __P, + (__v4di) __W, + (__mmask8) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_load_epi64 (__mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_movdqa64load256_mask ((__v4di *) __P, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) + __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_store_epi64 (void *__P, __mmask8 __U, __m128i __A) +{ + __builtin_ia32_movdqa64store128_mask ((__v2di *) __P, + (__v2di) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_store_epi64 (void *__P, __mmask8 __U, __m256i __A) +{ + __builtin_ia32_movdqa64store256_mask ((__v4di *) __P, + (__v4di) __A, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_movedup_pd (__m128d __W, __mmask8 __U, __m128d __A) +{ + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_movedup_pd(__A), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_movedup_pd (__mmask8 __U, __m128d __A) +{ + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_movedup_pd(__A), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_movedup_pd (__m256d __W, __mmask8 __U, __m256d __A) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_movedup_pd(__A), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_movedup_pd (__mmask8 __U, __m256d __A) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_movedup_pd(__A), + (__v4df)_mm256_setzero_pd()); +} + + +#define _mm_mask_set1_epi32(O, M, A) __extension__ ({ \ + (__m128i)__builtin_ia32_pbroadcastd128_gpr_mask((int)(A), \ + (__v4si)(__m128i)(O), \ + (__mmask8)(M)); }) + +#define _mm_maskz_set1_epi32(M, A) __extension__ ({ \ + (__m128i)__builtin_ia32_pbroadcastd128_gpr_mask((int)(A), \ + (__v4si)_mm_setzero_si128(), \ + (__mmask8)(M)); }) + +#define _mm256_mask_set1_epi32(O, M, A) __extension__ ({ \ + (__m256i)__builtin_ia32_pbroadcastd256_gpr_mask((int)(A), \ + (__v8si)(__m256i)(O), \ + (__mmask8)(M)); }) + +#define _mm256_maskz_set1_epi32(M, A) __extension__ ({ \ + (__m256i)__builtin_ia32_pbroadcastd256_gpr_mask((int)(A), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)(M)); }) + +#ifdef __x86_64__ +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_set1_epi64 (__m128i __O, __mmask8 __M, long long __A) +{ + return (__m128i) __builtin_ia32_pbroadcastq128_gpr_mask (__A, (__v2di) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_set1_epi64 (__mmask8 __M, long long __A) +{ + return (__m128i) __builtin_ia32_pbroadcastq128_gpr_mask (__A, + (__v2di) + _mm_setzero_si128 (), + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_set1_epi64 (__m256i __O, __mmask8 __M, long long __A) +{ + return (__m256i) __builtin_ia32_pbroadcastq256_gpr_mask (__A, (__v4di) __O, + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_set1_epi64 (__mmask8 __M, long long __A) +{ + return (__m256i) __builtin_ia32_pbroadcastq256_gpr_mask (__A, + (__v4di) + _mm256_setzero_si256 (), + __M); +} +#endif + +#define _mm_fixupimm_pd(A, B, C, imm) __extension__ ({ \ + (__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)-1); }) + +#define _mm_mask_fixupimm_pd(A, U, B, C, imm) __extension__ ({ \ + (__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)(U)); }) + +#define _mm_maskz_fixupimm_pd(U, A, B, C, imm) __extension__ ({ \ + (__m128d)__builtin_ia32_fixupimmpd128_maskz((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), \ + (int)(imm), (__mmask8)(U)); }) + +#define _mm256_fixupimm_pd(A, B, C, imm) __extension__ ({ \ + (__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), \ + (__v4di)(__m256i)(C), (int)(imm), \ + (__mmask8)-1); }) + +#define _mm256_mask_fixupimm_pd(A, U, B, C, imm) __extension__ ({ \ + (__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), \ + (__v4di)(__m256i)(C), (int)(imm), \ + (__mmask8)(U)); }) + +#define _mm256_maskz_fixupimm_pd(U, A, B, C, imm) __extension__ ({ \ + (__m256d)__builtin_ia32_fixupimmpd256_maskz((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), \ + (__v4di)(__m256i)(C), \ + (int)(imm), (__mmask8)(U)); }) + +#define _mm_fixupimm_ps(A, B, C, imm) __extension__ ({ \ + (__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)-1); }) + +#define _mm_mask_fixupimm_ps(A, U, B, C, imm) __extension__ ({ \ + (__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)(U)); }) + +#define _mm_maskz_fixupimm_ps(U, A, B, C, imm) __extension__ ({ \ + (__m128)__builtin_ia32_fixupimmps128_maskz((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)(U)); }) + +#define _mm256_fixupimm_ps(A, B, C, imm) __extension__ ({ \ + (__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), \ + (__v8si)(__m256i)(C), (int)(imm), \ + (__mmask8)-1); }) + +#define _mm256_mask_fixupimm_ps(A, U, B, C, imm) __extension__ ({ \ + (__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), \ + (__v8si)(__m256i)(C), (int)(imm), \ + (__mmask8)(U)); }) + +#define _mm256_maskz_fixupimm_ps(U, A, B, C, imm) __extension__ ({ \ + (__m256)__builtin_ia32_fixupimmps256_maskz((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), \ + (__v8si)(__m256i)(C), (int)(imm), \ + (__mmask8)(U)); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_load_pd (__m128d __W, __mmask8 __U, void const *__P) +{ + return (__m128d) __builtin_ia32_loadapd128_mask ((__v2df *) __P, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_load_pd (__mmask8 __U, void const *__P) +{ + return (__m128d) __builtin_ia32_loadapd128_mask ((__v2df *) __P, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_load_pd (__m256d __W, __mmask8 __U, void const *__P) +{ + return (__m256d) __builtin_ia32_loadapd256_mask ((__v4df *) __P, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_load_pd (__mmask8 __U, void const *__P) +{ + return (__m256d) __builtin_ia32_loadapd256_mask ((__v4df *) __P, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_load_ps (__m128 __W, __mmask8 __U, void const *__P) +{ + return (__m128) __builtin_ia32_loadaps128_mask ((__v4sf *) __P, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_load_ps (__mmask8 __U, void const *__P) +{ + return (__m128) __builtin_ia32_loadaps128_mask ((__v4sf *) __P, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_load_ps (__m256 __W, __mmask8 __U, void const *__P) +{ + return (__m256) __builtin_ia32_loadaps256_mask ((__v8sf *) __P, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_load_ps (__mmask8 __U, void const *__P) +{ + return (__m256) __builtin_ia32_loadaps256_mask ((__v8sf *) __P, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_loadu_epi64 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddqudi128_mask ((__v2di *) __P, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_loadu_epi64 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddqudi128_mask ((__v2di *) __P, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_loadu_epi64 (__m256i __W, __mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddqudi256_mask ((__v4di *) __P, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_loadu_epi64 (__mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddqudi256_mask ((__v4di *) __P, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_loadu_epi32 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddqusi128_mask ((__v4si *) __P, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_loadu_epi32 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddqusi128_mask ((__v4si *) __P, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_loadu_epi32 (__m256i __W, __mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddqusi256_mask ((__v8si *) __P, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_loadu_epi32 (__mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddqusi256_mask ((__v8si *) __P, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_loadu_pd (__m128d __W, __mmask8 __U, void const *__P) +{ + return (__m128d) __builtin_ia32_loadupd128_mask ((__v2df *) __P, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_loadu_pd (__mmask8 __U, void const *__P) +{ + return (__m128d) __builtin_ia32_loadupd128_mask ((__v2df *) __P, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_loadu_pd (__m256d __W, __mmask8 __U, void const *__P) +{ + return (__m256d) __builtin_ia32_loadupd256_mask ((__v4df *) __P, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_loadu_pd (__mmask8 __U, void const *__P) +{ + return (__m256d) __builtin_ia32_loadupd256_mask ((__v4df *) __P, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_loadu_ps (__m128 __W, __mmask8 __U, void const *__P) +{ + return (__m128) __builtin_ia32_loadups128_mask ((__v4sf *) __P, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_loadu_ps (__mmask8 __U, void const *__P) +{ + return (__m128) __builtin_ia32_loadups128_mask ((__v4sf *) __P, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_loadu_ps (__m256 __W, __mmask8 __U, void const *__P) +{ + return (__m256) __builtin_ia32_loadups256_mask ((__v8sf *) __P, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_loadu_ps (__mmask8 __U, void const *__P) +{ + return (__m256) __builtin_ia32_loadups256_mask ((__v8sf *) __P, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_store_pd (void *__P, __mmask8 __U, __m128d __A) +{ + __builtin_ia32_storeapd128_mask ((__v2df *) __P, + (__v2df) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_store_pd (void *__P, __mmask8 __U, __m256d __A) +{ + __builtin_ia32_storeapd256_mask ((__v4df *) __P, + (__v4df) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_store_ps (void *__P, __mmask8 __U, __m128 __A) +{ + __builtin_ia32_storeaps128_mask ((__v4sf *) __P, + (__v4sf) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_store_ps (void *__P, __mmask8 __U, __m256 __A) +{ + __builtin_ia32_storeaps256_mask ((__v8sf *) __P, + (__v8sf) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_storeu_epi64 (void *__P, __mmask8 __U, __m128i __A) +{ + __builtin_ia32_storedqudi128_mask ((__v2di *) __P, + (__v2di) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_storeu_epi64 (void *__P, __mmask8 __U, __m256i __A) +{ + __builtin_ia32_storedqudi256_mask ((__v4di *) __P, + (__v4di) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_storeu_epi32 (void *__P, __mmask8 __U, __m128i __A) +{ + __builtin_ia32_storedqusi128_mask ((__v4si *) __P, + (__v4si) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_storeu_epi32 (void *__P, __mmask8 __U, __m256i __A) +{ + __builtin_ia32_storedqusi256_mask ((__v8si *) __P, + (__v8si) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_storeu_pd (void *__P, __mmask8 __U, __m128d __A) +{ + __builtin_ia32_storeupd128_mask ((__v2df *) __P, + (__v2df) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_storeu_pd (void *__P, __mmask8 __U, __m256d __A) +{ + __builtin_ia32_storeupd256_mask ((__v4df *) __P, + (__v4df) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_storeu_ps (void *__P, __mmask8 __U, __m128 __A) +{ + __builtin_ia32_storeups128_mask ((__v4sf *) __P, + (__v4sf) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_storeu_ps (void *__P, __mmask8 __U, __m256 __A) +{ + __builtin_ia32_storeups256_mask ((__v8sf *) __P, + (__v8sf) __A, + (__mmask8) __U); +} + + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_unpackhi_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_unpackhi_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_unpackhi_pd(__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_unpackhi_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_unpackhi_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_unpackhi_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_unpackhi_pd(__mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_unpackhi_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_unpackhi_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_unpackhi_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_unpackhi_ps(__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_unpackhi_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_unpackhi_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_unpackhi_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_unpackhi_ps(__mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_unpackhi_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_unpacklo_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_unpacklo_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_unpacklo_pd(__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_unpacklo_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_unpacklo_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_unpacklo_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_unpacklo_pd(__mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_unpacklo_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_unpacklo_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_unpacklo_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_unpacklo_ps(__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_unpacklo_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_unpacklo_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_unpacklo_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_unpacklo_ps(__mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_unpacklo_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_rcp14_pd (__m128d __A) +{ + return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_rcp14_pd (__m128d __W, __mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_rcp14_pd (__mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_rcp14_pd (__m256d __A) +{ + return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_rcp14_pd (__m256d __W, __mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_rcp14_pd (__mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_rcp14_ps (__m128 __A) +{ + return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_rcp14_ps (__m128 __W, __mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_rcp14_ps (__mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_rcp14_ps (__m256 __A) +{ + return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_rcp14_ps (__m256 __W, __mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_rcp14_ps (__mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +#define _mm_mask_permute_pd(W, U, X, C) __extension__ ({ \ + (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ + (__v2df)_mm_permute_pd((X), (C)), \ + (__v2df)(__m128d)(W)); }) + +#define _mm_maskz_permute_pd(U, X, C) __extension__ ({ \ + (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ + (__v2df)_mm_permute_pd((X), (C)), \ + (__v2df)_mm_setzero_pd()); }) + +#define _mm256_mask_permute_pd(W, U, X, C) __extension__ ({ \ + (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_permute_pd((X), (C)), \ + (__v4df)(__m256d)(W)); }) + +#define _mm256_maskz_permute_pd(U, X, C) __extension__ ({ \ + (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_permute_pd((X), (C)), \ + (__v4df)_mm256_setzero_pd()); }) + +#define _mm_mask_permute_ps(W, U, X, C) __extension__ ({ \ + (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ + (__v4sf)_mm_permute_ps((X), (C)), \ + (__v4sf)(__m128)(W)); }) + +#define _mm_maskz_permute_ps(U, X, C) __extension__ ({ \ + (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ + (__v4sf)_mm_permute_ps((X), (C)), \ + (__v4sf)_mm_setzero_ps()); }) + +#define _mm256_mask_permute_ps(W, U, X, C) __extension__ ({ \ + (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_permute_ps((X), (C)), \ + (__v8sf)(__m256)(W)); }) + +#define _mm256_maskz_permute_ps(U, X, C) __extension__ ({ \ + (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_permute_ps((X), (C)), \ + (__v8sf)_mm256_setzero_ps()); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_permutevar_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128i __C) +{ + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_permutevar_pd(__A, __C), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_permutevar_pd(__mmask8 __U, __m128d __A, __m128i __C) +{ + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_permutevar_pd(__A, __C), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_permutevar_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256i __C) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_permutevar_pd(__A, __C), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_permutevar_pd(__mmask8 __U, __m256d __A, __m256i __C) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_permutevar_pd(__A, __C), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_permutevar_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128i __C) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_permutevar_ps(__A, __C), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_permutevar_ps(__mmask8 __U, __m128 __A, __m128i __C) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_permutevar_ps(__A, __C), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_permutevar_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256i __C) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_permutevar_ps(__A, __C), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_permutevar_ps(__mmask8 __U, __m256 __A, __m256i __C) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_permutevar_ps(__A, __C), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm_test_epi32_mask (__m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ptestmd128 ((__v4si) __A, + (__v4si) __B, + (__mmask8) -1); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm_mask_test_epi32_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ptestmd128 ((__v4si) __A, + (__v4si) __B, __U); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm256_test_epi32_mask (__m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_ptestmd256 ((__v8si) __A, + (__v8si) __B, + (__mmask8) -1); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm256_mask_test_epi32_mask (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_ptestmd256 ((__v8si) __A, + (__v8si) __B, __U); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm_test_epi64_mask (__m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ptestmq128 ((__v2di) __A, + (__v2di) __B, + (__mmask8) -1); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm_mask_test_epi64_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ptestmq128 ((__v2di) __A, + (__v2di) __B, __U); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm256_test_epi64_mask (__m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_ptestmq256 ((__v4di) __A, + (__v4di) __B, + (__mmask8) -1); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm256_mask_test_epi64_mask (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_ptestmq256 ((__v4di) __A, + (__v4di) __B, __U); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm_testn_epi32_mask (__m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ptestnmd128 ((__v4si) __A, + (__v4si) __B, + (__mmask8) -1); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm_mask_testn_epi32_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ptestnmd128 ((__v4si) __A, + (__v4si) __B, __U); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm256_testn_epi32_mask (__m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_ptestnmd256 ((__v8si) __A, + (__v8si) __B, + (__mmask8) -1); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm256_mask_testn_epi32_mask (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_ptestnmd256 ((__v8si) __A, + (__v8si) __B, __U); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm_testn_epi64_mask (__m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ptestnmq128 ((__v2di) __A, + (__v2di) __B, + (__mmask8) -1); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm_mask_testn_epi64_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ptestnmq128 ((__v2di) __A, + (__v2di) __B, __U); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm256_testn_epi64_mask (__m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_ptestnmq256 ((__v4di) __A, + (__v4di) __B, + (__mmask8) -1); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_mm256_mask_testn_epi64_mask (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_ptestnmq256 ((__v4di) __A, + (__v4di) __B, __U); +} + + + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_unpackhi_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_unpackhi_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_unpackhi_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_unpackhi_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_unpackhi_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_unpackhi_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_unpackhi_epi32(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_unpackhi_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_unpackhi_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_unpackhi_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_unpackhi_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_unpackhi_epi64(__A, __B), + (__v2di)_mm_setzero_di()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_unpackhi_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_unpackhi_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_unpackhi_epi64(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_unpackhi_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_unpacklo_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_unpacklo_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_unpacklo_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_unpacklo_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_unpacklo_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_unpacklo_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_unpacklo_epi32(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_unpacklo_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_unpacklo_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_unpacklo_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_unpacklo_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_unpacklo_epi64(__A, __B), + (__v2di)_mm_setzero_di()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_unpacklo_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_unpacklo_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_unpacklo_epi64(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_unpacklo_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_sra_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_sra_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_sra_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_sra_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_sra_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_sra_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_sra_epi32(__mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_sra_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srai_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srai_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srai_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srai_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sra_epi64(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psraq128((__v2di)__A, (__v2di)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_sra_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \ + (__v2di)_mm_sra_epi64(__A, __B), \ + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_sra_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \ + (__v2di)_mm_sra_epi64(__A, __B), \ + (__v2di)_mm_setzero_di()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_sra_epi64(__m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_psraq256((__v4di) __A, (__v2di) __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_sra_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \ + (__v4di)_mm256_sra_epi64(__A, __B), \ + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_sra_epi64(__mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \ + (__v4di)_mm256_sra_epi64(__A, __B), \ + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_srai_epi64(__m128i __A, int __imm) +{ + return (__m128i)__builtin_ia32_psraqi128((__v2di)__A, __imm); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __imm) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \ + (__v2di)_mm_srai_epi64(__A, __imm), \ + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, int __imm) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \ + (__v2di)_mm_srai_epi64(__A, __imm), \ + (__v2di)_mm_setzero_di()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_srai_epi64(__m256i __A, int __imm) +{ + return (__m256i)__builtin_ia32_psraqi256((__v4di)__A, __imm); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __imm) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \ + (__v4di)_mm256_srai_epi64(__A, __imm), \ + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, int __imm) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \ + (__v4di)_mm256_srai_epi64(__A, __imm), \ + (__v4di)_mm256_setzero_si256()); +} + +#define _mm_ternarylogic_epi32(A, B, C, imm) __extension__ ({ \ + (__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \ + (__v4si)(__m128i)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)-1); }) + +#define _mm_mask_ternarylogic_epi32(A, U, B, C, imm) __extension__ ({ \ + (__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \ + (__v4si)(__m128i)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)(U)); }) + +#define _mm_maskz_ternarylogic_epi32(U, A, B, C, imm) __extension__ ({ \ + (__m128i)__builtin_ia32_pternlogd128_maskz((__v4si)(__m128i)(A), \ + (__v4si)(__m128i)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)(U)); }) + +#define _mm256_ternarylogic_epi32(A, B, C, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \ + (__v8si)(__m256i)(B), \ + (__v8si)(__m256i)(C), (int)(imm), \ + (__mmask8)-1); }) + +#define _mm256_mask_ternarylogic_epi32(A, U, B, C, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \ + (__v8si)(__m256i)(B), \ + (__v8si)(__m256i)(C), (int)(imm), \ + (__mmask8)(U)); }) + +#define _mm256_maskz_ternarylogic_epi32(U, A, B, C, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_pternlogd256_maskz((__v8si)(__m256i)(A), \ + (__v8si)(__m256i)(B), \ + (__v8si)(__m256i)(C), (int)(imm), \ + (__mmask8)(U)); }) + +#define _mm_ternarylogic_epi64(A, B, C, imm) __extension__ ({ \ + (__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \ + (__v2di)(__m128i)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)-1); }) + +#define _mm_mask_ternarylogic_epi64(A, U, B, C, imm) __extension__ ({ \ + (__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \ + (__v2di)(__m128i)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)(U)); }) + +#define _mm_maskz_ternarylogic_epi64(U, A, B, C, imm) __extension__ ({ \ + (__m128i)__builtin_ia32_pternlogq128_maskz((__v2di)(__m128i)(A), \ + (__v2di)(__m128i)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)(U)); }) + +#define _mm256_ternarylogic_epi64(A, B, C, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \ + (__v4di)(__m256i)(B), \ + (__v4di)(__m256i)(C), (int)(imm), \ + (__mmask8)-1); }) + +#define _mm256_mask_ternarylogic_epi64(A, U, B, C, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \ + (__v4di)(__m256i)(B), \ + (__v4di)(__m256i)(C), (int)(imm), \ + (__mmask8)(U)); }) + +#define _mm256_maskz_ternarylogic_epi64(U, A, B, C, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_pternlogq256_maskz((__v4di)(__m256i)(A), \ + (__v4di)(__m256i)(B), \ + (__v4di)(__m256i)(C), (int)(imm), \ + (__mmask8)(U)); }) + + + +#define _mm256_shuffle_f32x4(A, B, imm) __extension__ ({ \ + (__m256)__builtin_ia32_shuf_f32x4_256_mask((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), (int)(imm), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)-1); }) + +#define _mm256_mask_shuffle_f32x4(W, U, A, B, imm) __extension__ ({ \ + (__m256)__builtin_ia32_shuf_f32x4_256_mask((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), (int)(imm), \ + (__v8sf)(__m256)(W), \ + (__mmask8)(U)); }) + +#define _mm256_maskz_shuffle_f32x4(U, A, B, imm) __extension__ ({ \ + (__m256)__builtin_ia32_shuf_f32x4_256_mask((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), (int)(imm), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)(U)); }) + +#define _mm256_shuffle_f64x2(A, B, imm) __extension__ ({ \ + (__m256d)__builtin_ia32_shuf_f64x2_256_mask((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), \ + (int)(imm), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)-1); }) + +#define _mm256_mask_shuffle_f64x2(W, U, A, B, imm) __extension__ ({ \ + (__m256d)__builtin_ia32_shuf_f64x2_256_mask((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), \ + (int)(imm), \ + (__v4df)(__m256d)(W), \ + (__mmask8)(U)); }) + +#define _mm256_maskz_shuffle_f64x2(U, A, B, imm) __extension__ ({ \ + (__m256d)__builtin_ia32_shuf_f64x2_256_mask((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), \ + (int)(imm), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)(U)); }) + +#define _mm256_shuffle_i32x4(A, B, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_shuf_i32x4_256_mask((__v8si)(__m256i)(A), \ + (__v8si)(__m256i)(B), \ + (int)(imm), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)-1); }) + +#define _mm256_mask_shuffle_i32x4(W, U, A, B, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_shuf_i32x4_256_mask((__v8si)(__m256i)(A), \ + (__v8si)(__m256i)(B), \ + (int)(imm), \ + (__v8si)(__m256i)(W), \ + (__mmask8)(U)); }) + +#define _mm256_maskz_shuffle_i32x4(U, A, B, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_shuf_i32x4_256_mask((__v8si)(__m256i)(A), \ + (__v8si)(__m256i)(B), \ + (int)(imm), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)(U)); }) + +#define _mm256_shuffle_i64x2(A, B, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_shuf_i64x2_256_mask((__v4di)(__m256i)(A), \ + (__v4di)(__m256i)(B), \ + (int)(imm), \ + (__v4di)_mm256_setzero_si256(), \ + (__mmask8)-1); }) + +#define _mm256_mask_shuffle_i64x2(W, U, A, B, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_shuf_i64x2_256_mask((__v4di)(__m256i)(A), \ + (__v4di)(__m256i)(B), \ + (int)(imm), \ + (__v4di)(__m256i)(W), \ + (__mmask8)(U)); }) + +#define _mm256_maskz_shuffle_i64x2(U, A, B, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_shuf_i64x2_256_mask((__v4di)(__m256i)(A), \ + (__v4di)(__m256i)(B), \ + (int)(imm), \ + (__v4di)_mm256_setzero_si256(), \ + (__mmask8)(U)); }) + +#define _mm_mask_shuffle_pd(W, U, A, B, M) __extension__ ({ \ + (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ + (__v2df)_mm_shuffle_pd((A), (B), (M)), \ + (__v2df)(__m128d)(W)); }) + +#define _mm_maskz_shuffle_pd(U, A, B, M) __extension__ ({ \ + (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ + (__v2df)_mm_shuffle_pd((A), (B), (M)), \ + (__v2df)_mm_setzero_pd()); }) + +#define _mm256_mask_shuffle_pd(W, U, A, B, M) __extension__ ({ \ + (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_shuffle_pd((A), (B), (M)), \ + (__v4df)(__m256d)(W)); }) + +#define _mm256_maskz_shuffle_pd(U, A, B, M) __extension__ ({ \ + (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_shuffle_pd((A), (B), (M)), \ + (__v4df)_mm256_setzero_pd()); }) + +#define _mm_mask_shuffle_ps(W, U, A, B, M) __extension__ ({ \ + (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ + (__v4sf)_mm_shuffle_ps((A), (B), (M)), \ + (__v4sf)(__m128)(W)); }) + +#define _mm_maskz_shuffle_ps(U, A, B, M) __extension__ ({ \ + (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ + (__v4sf)_mm_shuffle_ps((A), (B), (M)), \ + (__v4sf)_mm_setzero_ps()); }) + +#define _mm256_mask_shuffle_ps(W, U, A, B, M) __extension__ ({ \ + (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \ + (__v8sf)(__m256)(W)); }) + +#define _mm256_maskz_shuffle_ps(U, A, B, M) __extension__ ({ \ + (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \ + (__v8sf)_mm256_setzero_ps()); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_rsqrt14_pd (__m128d __A) +{ + return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_rsqrt14_pd (__m128d __W, __mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_rsqrt14_pd (__mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_rsqrt14_pd (__m256d __A) +{ + return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_rsqrt14_pd (__m256d __W, __mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_rsqrt14_pd (__mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_rsqrt14_ps (__m128 __A) +{ + return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_rsqrt14_ps (__m128 __W, __mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_rsqrt14_ps (__mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_rsqrt14_ps (__m256 __A) +{ + return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_rsqrt14_ps (__m256 __W, __mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_rsqrt14_ps (__mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_broadcast_f32x4 (__m128 __A) +{ + return (__m256) __builtin_ia32_broadcastf32x4_256_mask ((__v4sf) __A, + (__v8sf)_mm256_undefined_pd (), + (__mmask8) -1); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_broadcast_f32x4 (__m256 __O, __mmask8 __M, __m128 __A) +{ + return (__m256) __builtin_ia32_broadcastf32x4_256_mask ((__v4sf) __A, + (__v8sf) __O, + __M); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_broadcast_f32x4 (__mmask8 __M, __m128 __A) +{ + return (__m256) __builtin_ia32_broadcastf32x4_256_mask ((__v4sf) __A, + (__v8sf) _mm256_setzero_ps (), + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_broadcast_i32x4 (__m128i __A) +{ + return (__m256i) __builtin_ia32_broadcasti32x4_256_mask ((__v4si) __A, + (__v8si)_mm256_undefined_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_broadcast_i32x4 (__m256i __O, __mmask8 __M, __m128i __A) +{ + return (__m256i) __builtin_ia32_broadcasti32x4_256_mask ((__v4si) __A, + (__v8si) + __O, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_broadcast_i32x4 (__mmask8 __M, __m128i __A) +{ + return (__m256i) __builtin_ia32_broadcasti32x4_256_mask ((__v4si) + __A, + (__v8si) _mm256_setzero_si256 (), + __M); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_broadcastsd_pd (__m256d __O, __mmask8 __M, __m128d __A) +{ + return (__m256d)__builtin_ia32_selectpd_256(__M, + (__v4df) _mm256_broadcastsd_pd(__A), + (__v4df) __O); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_broadcastsd_pd (__mmask8 __M, __m128d __A) +{ + return (__m256d)__builtin_ia32_selectpd_256(__M, + (__v4df) _mm256_broadcastsd_pd(__A), + (__v4df) _mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_broadcastss_ps (__m128 __O, __mmask8 __M, __m128 __A) +{ + return (__m128)__builtin_ia32_selectps_128(__M, + (__v4sf) _mm_broadcastss_ps(__A), + (__v4sf) __O); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_broadcastss_ps (__mmask8 __M, __m128 __A) +{ + return (__m128)__builtin_ia32_selectps_128(__M, + (__v4sf) _mm_broadcastss_ps(__A), + (__v4sf) _mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_broadcastss_ps (__m256 __O, __mmask8 __M, __m128 __A) +{ + return (__m256)__builtin_ia32_selectps_256(__M, + (__v8sf) _mm256_broadcastss_ps(__A), + (__v8sf) __O); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_broadcastss_ps (__mmask8 __M, __m128 __A) +{ + return (__m256)__builtin_ia32_selectps_256(__M, + (__v8sf) _mm256_broadcastss_ps(__A), + (__v8sf) _mm256_setzero_ps()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_broadcastd_epi32 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128(__M, + (__v4si) _mm_broadcastd_epi32(__A), + (__v4si) __O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_broadcastd_epi32 (__mmask8 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128(__M, + (__v4si) _mm_broadcastd_epi32(__A), + (__v4si) _mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_broadcastd_epi32 (__m256i __O, __mmask8 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectd_256(__M, + (__v8si) _mm256_broadcastd_epi32(__A), + (__v8si) __O); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_broadcastd_epi32 (__mmask8 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectd_256(__M, + (__v8si) _mm256_broadcastd_epi32(__A), + (__v8si) _mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_broadcastq_epi64 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectq_128(__M, + (__v2di) _mm_broadcastq_epi64(__A), + (__v2di) __O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectq_128(__M, + (__v2di) _mm_broadcastq_epi64(__A), + (__v2di) _mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_broadcastq_epi64 (__m256i __O, __mmask8 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectq_256(__M, + (__v4di) _mm256_broadcastq_epi64(__A), + (__v4di) __O); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectq_256(__M, + (__v4di) _mm256_broadcastq_epi64(__A), + (__v4di) _mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtsepi32_epi8 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtsepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtsepi32_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovsdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_cvtsepi32_epi8 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_mask_cvtsepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtsepi32_epi8 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovsdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtsepi32_epi16 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A, + (__v8hi)_mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtsepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A, + (__v8hi)__O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtsepi32_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_cvtsepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovsdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_cvtsepi32_epi16 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A, + (__v8hi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_mask_cvtsepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtsepi32_epi16 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_cvtsepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovsdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtsepi64_epi8 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtsepi64_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovsqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_cvtsepi64_epi8 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtsepi64_epi8 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovsqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtsepi64_epi32 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A, + (__v4si)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtsepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A, + (__v4si) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtsepi64_epi32 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A, + (__v4si) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_cvtsepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovsqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_cvtsepi64_epi32 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A, + (__v4si)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_mask_cvtsepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A, + (__v4si)__O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtsepi64_epi32 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A, + (__v4si) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_cvtsepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovsqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtsepi64_epi16 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A, + (__v8hi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtsepi64_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovsqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_cvtsepi64_epi16 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A, + (__v8hi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtsepi64_epi16 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovsqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtusepi32_epi8 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtusepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtusepi32_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovusdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_cvtusepi32_epi8 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_mask_cvtusepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtusepi32_epi8 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovusdb256mem_mask ((__v16qi*) __P, (__v8si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtusepi32_epi16 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A, + (__v8hi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtusepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtusepi32_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_cvtusepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovusdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_cvtusepi32_epi16 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A, + (__v8hi) _mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_mask_cvtusepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtusepi32_epi16 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_cvtusepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovusdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtusepi64_epi8 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtusepi64_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovusqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_cvtusepi64_epi8 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtusepi64_epi8 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovusqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtusepi64_epi32 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A, + (__v4si)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtusepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A, + (__v4si) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtusepi64_epi32 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A, + (__v4si) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_cvtusepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovusqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_cvtusepi64_epi32 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A, + (__v4si)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_mask_cvtusepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A, + (__v4si) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtusepi64_epi32 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A, + (__v4si) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_cvtusepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovusqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtusepi64_epi16 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A, + (__v8hi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtusepi64_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_cvtusepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovusqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_cvtusepi64_epi16 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A, + (__v8hi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtusepi64_epi16 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_cvtusepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) +{ + return __builtin_ia32_pmovusqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtepi32_epi8 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtepi32_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_cvtepi32_epi8 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_mask_cvtepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepi32_epi8 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtepi32_epi16 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A, + (__v8hi) _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtepi32_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_cvtepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_cvtepi32_epi16 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A, + (__v8hi)_mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_mask_cvtepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepi32_epi16 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_cvtepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtepi64_epi8 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A, + (__v16qi) _mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtepi64_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_cvtepi64_epi8 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A, + (__v16qi) _mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepi64_epi8 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtepi64_epi32 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A, + (__v4si)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A, + (__v4si) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtepi64_epi32 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A, + (__v4si) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_cvtepi64_epi32 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqd256_mask ((__v4di) __A, + (__v4si) _mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_mask_cvtepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqd256_mask ((__v4di) __A, + (__v4si) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepi64_epi32 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqd256_mask ((__v4di) __A, + (__v4si) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cvtepi64_epi16 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A, + (__v8hi) _mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A, + (__v8hi)__O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtepi64_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_cvtepi64_epi16 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A, + (__v8hi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtepi64_epi16 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M); +} + +#define _mm256_extractf32x4_ps(A, imm) __extension__ ({ \ + (__m128)__builtin_shufflevector((__v8sf)(__m256)(A), \ + (__v8sf)_mm256_undefined_ps(), \ + ((imm) & 1) ? 4 : 0, \ + ((imm) & 1) ? 5 : 1, \ + ((imm) & 1) ? 6 : 2, \ + ((imm) & 1) ? 7 : 3); }) + +#define _mm256_mask_extractf32x4_ps(W, U, A, imm) __extension__ ({ \ + (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ + (__v4sf)_mm256_extractf32x4_ps((A), (imm)), \ + (__v4sf)(W)); }) + +#define _mm256_maskz_extractf32x4_ps(U, A, imm) __extension__ ({ \ + (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ + (__v4sf)_mm256_extractf32x4_ps((A), (imm)), \ + (__v4sf)_mm_setzero_ps()); }) + +#define _mm256_extracti32x4_epi32(A, imm) __extension__ ({ \ + (__m128i)__builtin_shufflevector((__v8si)(__m256)(A), \ + (__v8si)_mm256_undefined_si256(), \ + ((imm) & 1) ? 4 : 0, \ + ((imm) & 1) ? 5 : 1, \ + ((imm) & 1) ? 6 : 2, \ + ((imm) & 1) ? 7 : 3); }) + +#define _mm256_mask_extracti32x4_epi32(W, U, A, imm) __extension__ ({ \ + (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ + (__v4si)_mm256_extracti32x4_epi32((A), (imm)), \ + (__v4si)(W)); }) + +#define _mm256_maskz_extracti32x4_epi32(U, A, imm) __extension__ ({ \ + (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ + (__v4si)_mm256_extracti32x4_epi32((A), (imm)), \ + (__v4si)_mm_setzero_si128()); }) + +#define _mm256_insertf32x4(A, B, imm) __extension__ ({ \ + (__m256)__builtin_shufflevector((__v8sf)(A), \ + (__v8sf)_mm256_castps128_ps256((__m128)(B)), \ + ((imm) & 0x1) ? 0 : 8, \ + ((imm) & 0x1) ? 1 : 9, \ + ((imm) & 0x1) ? 2 : 10, \ + ((imm) & 0x1) ? 3 : 11, \ + ((imm) & 0x1) ? 8 : 4, \ + ((imm) & 0x1) ? 9 : 5, \ + ((imm) & 0x1) ? 10 : 6, \ + ((imm) & 0x1) ? 11 : 7); }) + +#define _mm256_mask_insertf32x4(W, U, A, B, imm) __extension__ ({ \ + (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_insertf32x4((A), (B), (imm)), \ + (__v8sf)(W)); }) + +#define _mm256_maskz_insertf32x4(U, A, B, imm) __extension__ ({ \ + (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_insertf32x4((A), (B), (imm)), \ + (__v8sf)_mm256_setzero_ps()); }) + +#define _mm256_inserti32x4(A, B, imm) __extension__ ({ \ + (__m256i)__builtin_shufflevector((__v8si)(A), \ + (__v8si)_mm256_castsi128_si256((__m128i)(B)), \ + ((imm) & 0x1) ? 0 : 8, \ + ((imm) & 0x1) ? 1 : 9, \ + ((imm) & 0x1) ? 2 : 10, \ + ((imm) & 0x1) ? 3 : 11, \ + ((imm) & 0x1) ? 8 : 4, \ + ((imm) & 0x1) ? 9 : 5, \ + ((imm) & 0x1) ? 10 : 6, \ + ((imm) & 0x1) ? 11 : 7); }) + +#define _mm256_mask_inserti32x4(W, U, A, B, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_inserti32x4((A), (B), (imm)), \ + (__v8si)(W)); }) + +#define _mm256_maskz_inserti32x4(U, A, B, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_inserti32x4((A), (B), (imm)), \ + (__v8si)_mm256_setzero_si256()); }) + +#define _mm_getmant_pd(A, B, C) __extension__({\ + (__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1); }) + +#define _mm_mask_getmant_pd(W, U, A, B, C) __extension__({\ + (__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U)); }) + +#define _mm_maskz_getmant_pd(U, A, B, C) __extension__({\ + (__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U)); }) + +#define _mm256_getmant_pd(A, B, C) __extension__ ({ \ + (__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)-1); }) + +#define _mm256_mask_getmant_pd(W, U, A, B, C) __extension__ ({ \ + (__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v4df)(__m256d)(W), \ + (__mmask8)(U)); }) + +#define _mm256_maskz_getmant_pd(U, A, B, C) __extension__ ({ \ + (__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)(U)); }) + +#define _mm_getmant_ps(A, B, C) __extension__ ({ \ + (__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \ + (int)(((C)<<2) | (B)), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1); }) + +#define _mm_mask_getmant_ps(W, U, A, B, C) __extension__ ({ \ + (__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \ + (int)(((C)<<2) | (B)), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U)); }) + +#define _mm_maskz_getmant_ps(U, A, B, C) __extension__ ({ \ + (__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \ + (int)(((C)<<2) | (B)), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U)); }) + +#define _mm256_getmant_ps(A, B, C) __extension__ ({ \ + (__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)-1); }) + +#define _mm256_mask_getmant_ps(W, U, A, B, C) __extension__ ({ \ + (__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8sf)(__m256)(W), \ + (__mmask8)(U)); }) + +#define _mm256_maskz_getmant_ps(U, A, B, C) __extension__ ({ \ + (__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)(U)); }) + +#define _mm_mmask_i64gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\ + (__m128d)__builtin_ia32_gather3div2df((__v2df)(__m128d)(v1_old), \ + (double const *)(addr), \ + (__v2di)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale)); }) + +#define _mm_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\ + (__m128i)__builtin_ia32_gather3div2di((__v2di)(__m128i)(v1_old), \ + (long long const *)(addr), \ + (__v2di)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale)); }) + +#define _mm256_mmask_i64gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\ + (__m256d)__builtin_ia32_gather3div4df((__v4df)(__m256d)(v1_old), \ + (double const *)(addr), \ + (__v4di)(__m256i)(index), \ + (__mmask8)(mask), (int)(scale)); }) + +#define _mm256_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\ + (__m256i)__builtin_ia32_gather3div4di((__v4di)(__m256i)(v1_old), \ + (long long const *)(addr), \ + (__v4di)(__m256i)(index), \ + (__mmask8)(mask), (int)(scale)); }) + +#define _mm_mmask_i64gather_ps(v1_old, mask, index, addr, scale) __extension__ ({\ + (__m128)__builtin_ia32_gather3div4sf((__v4sf)(__m128)(v1_old), \ + (float const *)(addr), \ + (__v2di)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale)); }) + +#define _mm_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\ + (__m128i)__builtin_ia32_gather3div4si((__v4si)(__m128i)(v1_old), \ + (int const *)(addr), \ + (__v2di)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale)); }) + +#define _mm256_mmask_i64gather_ps(v1_old, mask, index, addr, scale) __extension__ ({\ + (__m128)__builtin_ia32_gather3div8sf((__v4sf)(__m128)(v1_old), \ + (float const *)(addr), \ + (__v4di)(__m256i)(index), \ + (__mmask8)(mask), (int)(scale)); }) + +#define _mm256_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\ + (__m128i)__builtin_ia32_gather3div8si((__v4si)(__m128i)(v1_old), \ + (int const *)(addr), \ + (__v4di)(__m256i)(index), \ + (__mmask8)(mask), (int)(scale)); }) + +#define _mm_mmask_i32gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\ + (__m128d)__builtin_ia32_gather3siv2df((__v2df)(__m128d)(v1_old), \ + (double const *)(addr), \ + (__v4si)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale)); }) + +#define _mm_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\ + (__m128i)__builtin_ia32_gather3siv2di((__v2di)(__m128i)(v1_old), \ + (long long const *)(addr), \ + (__v4si)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale)); }) + +#define _mm256_mmask_i32gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\ + (__m256d)__builtin_ia32_gather3siv4df((__v4df)(__m256d)(v1_old), \ + (double const *)(addr), \ + (__v4si)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale)); }) + +#define _mm256_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\ + (__m256i)__builtin_ia32_gather3siv4di((__v4di)(__m256i)(v1_old), \ + (long long const *)(addr), \ + (__v4si)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale)); }) + +#define _mm_mmask_i32gather_ps(v1_old, mask, index, addr, scale) __extension__ ({\ + (__m128)__builtin_ia32_gather3siv4sf((__v4sf)(__m128)(v1_old), \ + (float const *)(addr), \ + (__v4si)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale)); }) + +#define _mm_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\ + (__m128i)__builtin_ia32_gather3siv4si((__v4si)(__m128i)(v1_old), \ + (int const *)(addr), \ + (__v4si)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale)); }) + +#define _mm256_mmask_i32gather_ps(v1_old, mask, index, addr, scale) __extension__ ({\ + (__m256)__builtin_ia32_gather3siv8sf((__v8sf)(__m256)(v1_old), \ + (float const *)(addr), \ + (__v8si)(__m256i)(index), \ + (__mmask8)(mask), (int)(scale)); }) + +#define _mm256_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\ + (__m256i)__builtin_ia32_gather3siv8si((__v8si)(__m256i)(v1_old), \ + (int const *)(addr), \ + (__v8si)(__m256i)(index), \ + (__mmask8)(mask), (int)(scale)); }) + +#define _mm256_permutex_pd(X, C) __extension__ ({ \ + (__m256d)__builtin_shufflevector((__v4df)(__m256d)(X), \ + (__v4df)_mm256_undefined_pd(), \ + ((C) >> 0) & 0x3, ((C) >> 2) & 0x3, \ + ((C) >> 4) & 0x3, ((C) >> 6) & 0x3); }) + +#define _mm256_mask_permutex_pd(W, U, X, C) __extension__ ({ \ + (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_permutex_pd((X), (C)), \ + (__v4df)(__m256d)(W)); }) + +#define _mm256_maskz_permutex_pd(U, X, C) __extension__ ({ \ + (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_permutex_pd((X), (C)), \ + (__v4df)_mm256_setzero_pd()); }) + +#define _mm256_permutex_epi64(X, C) __extension__ ({ \ + (__m256i)__builtin_shufflevector((__v4di)(__m256i)(X), \ + (__v4di)_mm256_undefined_si256(), \ + ((C) >> 0) & 0x3, ((C) >> 2) & 0x3, \ + ((C) >> 4) & 0x3, ((C) >> 6) & 0x3); }) + +#define _mm256_mask_permutex_epi64(W, U, X, C) __extension__ ({ \ + (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_permutex_epi64((X), (C)), \ + (__v4di)(__m256i)(W)); }) + +#define _mm256_maskz_permutex_epi64(U, X, C) __extension__ ({ \ + (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_permutex_epi64((X), (C)), \ + (__v4di)_mm256_setzero_si256()); }) + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_permutexvar_pd (__m256i __X, __m256d __Y) +{ + return (__m256d) __builtin_ia32_permvardf256_mask ((__v4df) __Y, + (__v4di) __X, + (__v4df) _mm256_undefined_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_permutexvar_pd (__m256d __W, __mmask8 __U, __m256i __X, + __m256d __Y) +{ + return (__m256d) __builtin_ia32_permvardf256_mask ((__v4df) __Y, + (__v4di) __X, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_permutexvar_pd (__mmask8 __U, __m256i __X, __m256d __Y) +{ + return (__m256d) __builtin_ia32_permvardf256_mask ((__v4df) __Y, + (__v4di) __X, + (__v4df) _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_permutexvar_epi64 (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_permvardi256_mask ((__v4di) __Y, + (__v4di) __X, + (__v4di) _mm256_setzero_si256 (), + (__mmask8) __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_permutexvar_epi64 ( __m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_permvardi256_mask ((__v4di) __Y, + (__v4di) __X, + (__v4di) _mm256_undefined_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_permutexvar_epi64 (__m256i __W, __mmask8 __M, __m256i __X, + __m256i __Y) +{ + return (__m256i) __builtin_ia32_permvardi256_mask ((__v4di) __Y, + (__v4di) __X, + (__v4di) __W, + __M); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_permutexvar_ps (__m256 __W, __mmask8 __U, __m256i __X, + __m256 __Y) +{ + return (__m256) __builtin_ia32_permvarsf256_mask ((__v8sf) __Y, + (__v8si) __X, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_permutexvar_ps (__mmask8 __U, __m256i __X, __m256 __Y) +{ + return (__m256) __builtin_ia32_permvarsf256_mask ((__v8sf) __Y, + (__v8si) __X, + (__v8sf) _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_permutexvar_ps (__m256i __X, __m256 __Y) +{ + return (__m256) __builtin_ia32_permvarsf256_mask ((__v8sf) __Y, + (__v8si) __X, + (__v8sf) _mm256_undefined_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_permutexvar_epi32 (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_permvarsi256_mask ((__v8si) __Y, + (__v8si) __X, + (__v8si) _mm256_setzero_si256 (), + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_permutexvar_epi32 (__m256i __W, __mmask8 __M, __m256i __X, + __m256i __Y) +{ + return (__m256i) __builtin_ia32_permvarsi256_mask ((__v8si) __Y, + (__v8si) __X, + (__v8si) __W, + (__mmask8) __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_permutexvar_epi32 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_permvarsi256_mask ((__v8si) __Y, + (__v8si) __X, + (__v8si) _mm256_undefined_si256(), + (__mmask8) -1); +} + +#define _mm_alignr_epi32(A, B, imm) __extension__ ({ \ + (__m128i)__builtin_shufflevector((__v4si)(__m128i)(B), \ + (__v4si)(__m128i)(A), \ + ((int)(imm) & 0x3) + 0, \ + ((int)(imm) & 0x3) + 1, \ + ((int)(imm) & 0x3) + 2, \ + ((int)(imm) & 0x3) + 3); }) + +#define _mm_mask_alignr_epi32(W, U, A, B, imm) __extension__ ({ \ + (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ + (__v4si)_mm_alignr_epi32((A), (B), (imm)), \ + (__v4si)(__m128i)(W)); }) + +#define _mm_maskz_alignr_epi32(U, A, B, imm) __extension__ ({ \ + (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ + (__v4si)_mm_alignr_epi32((A), (B), (imm)), \ + (__v4si)_mm_setzero_si128()); }) + +#define _mm256_alignr_epi32(A, B, imm) __extension__ ({ \ + (__m256i)__builtin_shufflevector((__v8si)(__m256i)(B), \ + (__v8si)(__m256i)(A), \ + ((int)(imm) & 0x7) + 0, \ + ((int)(imm) & 0x7) + 1, \ + ((int)(imm) & 0x7) + 2, \ + ((int)(imm) & 0x7) + 3, \ + ((int)(imm) & 0x7) + 4, \ + ((int)(imm) & 0x7) + 5, \ + ((int)(imm) & 0x7) + 6, \ + ((int)(imm) & 0x7) + 7); }) + +#define _mm256_mask_alignr_epi32(W, U, A, B, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_alignr_epi32((A), (B), (imm)), \ + (__v8si)(__m256i)(W)); }) + +#define _mm256_maskz_alignr_epi32(U, A, B, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_alignr_epi32((A), (B), (imm)), \ + (__v8si)_mm256_setzero_si256()); }) + +#define _mm_alignr_epi64(A, B, imm) __extension__ ({ \ + (__m128i)__builtin_shufflevector((__v2di)(__m128i)(B), \ + (__v2di)(__m128i)(A), \ + ((int)(imm) & 0x1) + 0, \ + ((int)(imm) & 0x1) + 1); }) + +#define _mm_mask_alignr_epi64(W, U, A, B, imm) __extension__ ({ \ + (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \ + (__v2di)_mm_alignr_epi64((A), (B), (imm)), \ + (__v2di)(__m128i)(W)); }) + +#define _mm_maskz_alignr_epi64(U, A, B, imm) __extension__ ({ \ + (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \ + (__v2di)_mm_alignr_epi64((A), (B), (imm)), \ + (__v2di)_mm_setzero_di()); }) + +#define _mm256_alignr_epi64(A, B, imm) __extension__ ({ \ + (__m256i)__builtin_shufflevector((__v4di)(__m256i)(B), \ + (__v4di)(__m256i)(A), \ + ((int)(imm) & 0x3) + 0, \ + ((int)(imm) & 0x3) + 1, \ + ((int)(imm) & 0x3) + 2, \ + ((int)(imm) & 0x3) + 3); }) + +#define _mm256_mask_alignr_epi64(W, U, A, B, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_alignr_epi64((A), (B), (imm)), \ + (__v4di)(__m256i)(W)); }) + +#define _mm256_maskz_alignr_epi64(U, A, B, imm) __extension__ ({ \ + (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_alignr_epi64((A), (B), (imm)), \ + (__v4di)_mm256_setzero_si256()); }) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_movehdup_ps (__m128 __W, __mmask8 __U, __m128 __A) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_movehdup_ps(__A), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_movehdup_ps (__mmask8 __U, __m128 __A) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_movehdup_ps(__A), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_movehdup_ps (__m256 __W, __mmask8 __U, __m256 __A) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_movehdup_ps(__A), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_movehdup_ps (__mmask8 __U, __m256 __A) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_movehdup_ps(__A), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_moveldup_ps (__m128 __W, __mmask8 __U, __m128 __A) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_moveldup_ps(__A), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_moveldup_ps (__mmask8 __U, __m128 __A) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_moveldup_ps(__A), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_moveldup_ps (__m256 __W, __mmask8 __U, __m256 __A) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_moveldup_ps(__A), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_moveldup_ps (__mmask8 __U, __m256 __A) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_moveldup_ps(__A), + (__v8sf)_mm256_setzero_ps()); +} + +#define _mm256_mask_shuffle_epi32(W, U, A, I) __extension__({\ + (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_shuffle_epi32((A), (I)), \ + (__v8si)(__m256i)(W)); }) + +#define _mm256_maskz_shuffle_epi32(U, A, I) __extension__({\ + (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_shuffle_epi32((A), (I)), \ + (__v8si)_mm256_setzero_si256()); }) + +#define _mm_mask_shuffle_epi32(W, U, A, I) __extension__({\ + (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ + (__v4si)_mm_shuffle_epi32((A), (I)), \ + (__v4si)(__m128i)(W)); }) + +#define _mm_maskz_shuffle_epi32(U, A, I) __extension__({\ + (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ + (__v4si)_mm_shuffle_epi32((A), (I)), \ + (__v4si)_mm_setzero_si128()); }) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_mov_pd (__m128d __W, __mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U, + (__v2df) __A, + (__v2df) __W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_mov_pd (__mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U, + (__v2df) __A, + (__v2df) _mm_setzero_pd ()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_mov_pd (__m256d __W, __mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U, + (__v4df) __A, + (__v4df) __W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_mov_pd (__mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U, + (__v4df) __A, + (__v4df) _mm256_setzero_pd ()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_mov_ps (__m128 __W, __mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U, + (__v4sf) __A, + (__v4sf) __W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_mov_ps (__mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U, + (__v4sf) __A, + (__v4sf) _mm_setzero_ps ()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_mov_ps (__m256 __W, __mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U, + (__v8sf) __A, + (__v8sf) __W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_mov_ps (__mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U, + (__v8sf) __A, + (__v8sf) _mm256_setzero_ps ()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_cvtph_ps (__m128 __W, __mmask8 __U, __m128i __A) +{ + return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_cvtph_ps (__mmask8 __U, __m128i __A) +{ + return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_cvtph_ps (__m256 __W, __mmask8 __U, __m128i __A) +{ + return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_cvtph_ps (__mmask8 __U, __m128i __A) +{ + return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline __m128i __DEFAULT_FN_ATTRS +_mm_mask_cvtps_ph (__m128i __W, __mmask8 __U, __m128 __A) +{ + return (__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf) __A, _MM_FROUND_CUR_DIRECTION, + (__v8hi) __W, + (__mmask8) __U); +} + +static __inline __m128i __DEFAULT_FN_ATTRS +_mm_maskz_cvtps_ph (__mmask8 __U, __m128 __A) +{ + return (__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf) __A, _MM_FROUND_CUR_DIRECTION, + (__v8hi) _mm_setzero_si128 (), + (__mmask8) __U); +} + +#define _mm_mask_cvt_roundps_ph(W, U, A, I) __extension__ ({ \ + (__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \ + (__v8hi)(__m128i)(W), \ + (__mmask8)(U)); }) + +#define _mm_maskz_cvt_roundps_ph(U, A, I) __extension__ ({ \ + (__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \ + (__v8hi)_mm_setzero_si128(), \ + (__mmask8)(U)); }) + +static __inline __m128i __DEFAULT_FN_ATTRS +_mm256_mask_cvtps_ph (__m128i __W, __mmask8 __U, __m256 __A) +{ + return (__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf) __A, _MM_FROUND_CUR_DIRECTION, + (__v8hi) __W, + (__mmask8) __U); +} + +static __inline __m128i __DEFAULT_FN_ATTRS +_mm256_maskz_cvtps_ph ( __mmask8 __U, __m256 __A) +{ + return (__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf) __A, _MM_FROUND_CUR_DIRECTION, + (__v8hi) _mm_setzero_si128(), + (__mmask8) __U); +} +#define _mm256_mask_cvt_roundps_ph(W, U, A, I) __extension__ ({ \ + (__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \ + (__v8hi)(__m128i)(W), \ + (__mmask8)(U)); }) + +#define _mm256_maskz_cvt_roundps_ph(U, A, I) __extension__ ({ \ + (__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \ + (__v8hi)_mm_setzero_si128(), \ + (__mmask8)(U)); }) + + #undef __DEFAULT_FN_ATTRS #endif /* __AVX512VLINTRIN_H */ diff --git a/c_headers/avxintrin.h b/c_headers/avxintrin.h index c1bc85b39..be03ba346 100644 --- a/c_headers/avxintrin.h +++ b/c_headers/avxintrin.h @@ -35,203 +35,721 @@ typedef int __v8si __attribute__ ((__vector_size__ (32))); typedef short __v16hi __attribute__ ((__vector_size__ (32))); typedef char __v32qi __attribute__ ((__vector_size__ (32))); +/* Unsigned types */ +typedef unsigned long long __v4du __attribute__ ((__vector_size__ (32))); +typedef unsigned int __v8su __attribute__ ((__vector_size__ (32))); +typedef unsigned short __v16hu __attribute__ ((__vector_size__ (32))); +typedef unsigned char __v32qu __attribute__ ((__vector_size__ (32))); + +/* We need an explicitly signed variant for char. Note that this shouldn't + * appear in the interface though. */ +typedef signed char __v32qs __attribute__((__vector_size__(32))); + typedef float __m256 __attribute__ ((__vector_size__ (32))); typedef double __m256d __attribute__((__vector_size__(32))); typedef long long __m256i __attribute__((__vector_size__(32))); /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx"))) /* Arithmetic */ +/// \brief Adds two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \returns A 256-bit vector of [4 x double] containing the sums of both +/// operands. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_add_pd(__m256d __a, __m256d __b) { - return __a+__b; + return (__m256d)((__v4df)__a+(__v4df)__b); } +/// \brief Adds two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \returns A 256-bit vector of [8 x float] containing the sums of both +/// operands. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_add_ps(__m256 __a, __m256 __b) { - return __a+__b; + return (__m256)((__v8sf)__a+(__v8sf)__b); } +/// \brief Subtracts two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSUBPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing the minuend. +/// \param __b +/// A 256-bit vector of [4 x double] containing the subtrahend. +/// \returns A 256-bit vector of [4 x double] containing the differences between +/// both operands. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_sub_pd(__m256d __a, __m256d __b) { - return __a-__b; + return (__m256d)((__v4df)__a-(__v4df)__b); } +/// \brief Subtracts two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSUBPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing the minuend. +/// \param __b +/// A 256-bit vector of [8 x float] containing the subtrahend. +/// \returns A 256-bit vector of [8 x float] containing the differences between +/// both operands. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_sub_ps(__m256 __a, __m256 __b) { - return __a-__b; + return (__m256)((__v8sf)__a-(__v8sf)__b); } +/// \brief Adds the even-indexed values and subtracts the odd-indexed values of +/// two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDSUBPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing the left source operand. +/// \param __b +/// A 256-bit vector of [4 x double] containing the right source operand. +/// \returns A 256-bit vector of [4 x double] containing the alternating sums +/// and differences between both operands. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_addsub_pd(__m256d __a, __m256d __b) { return (__m256d)__builtin_ia32_addsubpd256((__v4df)__a, (__v4df)__b); } +/// \brief Adds the even-indexed values and subtracts the odd-indexed values of +/// two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDSUBPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing the left source operand. +/// \param __b +/// A 256-bit vector of [8 x float] containing the right source operand. +/// \returns A 256-bit vector of [8 x float] containing the alternating sums and +/// differences between both operands. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_addsub_ps(__m256 __a, __m256 __b) { return (__m256)__builtin_ia32_addsubps256((__v8sf)__a, (__v8sf)__b); } +/// \brief Divides two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDIVPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing the dividend. +/// \param __b +/// A 256-bit vector of [4 x double] containing the divisor. +/// \returns A 256-bit vector of [4 x double] containing the quotients of both +/// operands. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_div_pd(__m256d __a, __m256d __b) { - return __a / __b; + return (__m256d)((__v4df)__a/(__v4df)__b); } +/// \brief Divides two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDIVPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing the dividend. +/// \param __b +/// A 256-bit vector of [8 x float] containing the divisor. +/// \returns A 256-bit vector of [8 x float] containing the quotients of both +/// operands. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_div_ps(__m256 __a, __m256 __b) { - return __a / __b; + return (__m256)((__v8sf)__a/(__v8sf)__b); } +/// \brief Compares two 256-bit vectors of [4 x double] and returns the greater +/// of each pair of values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMAXPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the operands. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the operands. +/// \returns A 256-bit vector of [4 x double] containing the maximum values +/// between both operands. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_max_pd(__m256d __a, __m256d __b) { return (__m256d)__builtin_ia32_maxpd256((__v4df)__a, (__v4df)__b); } +/// \brief Compares two 256-bit vectors of [8 x float] and returns the greater +/// of each pair of values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMAXPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the operands. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the operands. +/// \returns A 256-bit vector of [8 x float] containing the maximum values +/// between both operands. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_max_ps(__m256 __a, __m256 __b) { return (__m256)__builtin_ia32_maxps256((__v8sf)__a, (__v8sf)__b); } +/// \brief Compares two 256-bit vectors of [4 x double] and returns the lesser +/// of each pair of values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMINPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the operands. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the operands. +/// \returns A 256-bit vector of [4 x double] containing the minimum values +/// between both operands. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_min_pd(__m256d __a, __m256d __b) { return (__m256d)__builtin_ia32_minpd256((__v4df)__a, (__v4df)__b); } +/// \brief Compares two 256-bit vectors of [8 x float] and returns the lesser +/// of each pair of values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMINPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the operands. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the operands. +/// \returns A 256-bit vector of [8 x float] containing the minimum values +/// between both operands. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_min_ps(__m256 __a, __m256 __b) { return (__m256)__builtin_ia32_minps256((__v8sf)__a, (__v8sf)__b); } +/// \brief Multiplies two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMULPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the operands. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the operands. +/// \returns A 256-bit vector of [4 x double] containing the products of both +/// operands. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_mul_pd(__m256d __a, __m256d __b) { - return __a * __b; + return (__m256d)((__v4df)__a * (__v4df)__b); } +/// \brief Multiplies two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMULPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the operands. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the operands. +/// \returns A 256-bit vector of [8 x float] containing the products of both +/// operands. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_mul_ps(__m256 __a, __m256 __b) { - return __a * __b; + return (__m256)((__v8sf)__a * (__v8sf)__b); } +/// \brief Calculates the square roots of the values in a 256-bit vector of +/// [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSQRTPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \returns A 256-bit vector of [4 x double] containing the square roots of the +/// values in the operand. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_sqrt_pd(__m256d __a) { return (__m256d)__builtin_ia32_sqrtpd256((__v4df)__a); } +/// \brief Calculates the square roots of the values in a 256-bit vector of +/// [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSQRTPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit vector of [8 x float] containing the square roots of the +/// values in the operand. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_sqrt_ps(__m256 __a) { return (__m256)__builtin_ia32_sqrtps256((__v8sf)__a); } +/// \brief Calculates the reciprocal square roots of the values in a 256-bit +/// vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VRSQRTPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit vector of [8 x float] containing the reciprocal square +/// roots of the values in the operand. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_rsqrt_ps(__m256 __a) { return (__m256)__builtin_ia32_rsqrtps256((__v8sf)__a); } +/// \brief Calculates the reciprocals of the values in a 256-bit vector of +/// [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VRCPPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit vector of [8 x float] containing the reciprocals of the +/// values in the operand. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_rcp_ps(__m256 __a) { return (__m256)__builtin_ia32_rcpps256((__v8sf)__a); } +/// \brief Rounds the values in a 256-bit vector of [4 x double] as specified +/// by the byte operand. The source values are rounded to integer values and +/// returned as 64-bit double-precision floating-point values. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_round_pd(__m256d V, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPD instruction. +/// +/// \param V +/// A 256-bit vector of [4 x double]. +/// \param M +/// An integer value that specifies the rounding operation. \n +/// Bits [7:4] are reserved. \n +/// Bit [3] is a precision exception value: \n +/// 0: A normal PE exception is used. \n +/// 1: The PE field is not updated. \n +/// Bit [2] is the rounding control source: \n +/// 0: Use bits [1:0] of \a M. \n +/// 1: Use the current MXCSR setting. \n +/// Bits [1:0] contain the rounding control definition: \n +/// 00: Nearest. \n +/// 01: Downward (toward negative infinity). \n +/// 10: Upward (toward positive infinity). \n +/// 11: Truncated. +/// \returns A 256-bit vector of [4 x double] containing the rounded values. #define _mm256_round_pd(V, M) __extension__ ({ \ - __m256d __V = (V); \ - (__m256d)__builtin_ia32_roundpd256((__v4df)__V, (M)); }) + (__m256d)__builtin_ia32_roundpd256((__v4df)(__m256d)(V), (M)); }) +/// \brief Rounds the values stored in a 256-bit vector of [8 x float] as +/// specified by the byte operand. The source values are rounded to integer +/// values and returned as floating-point values. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_round_ps(__m256 V, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPS instruction. +/// +/// \param V +/// A 256-bit vector of [8 x float]. +/// \param M +/// An integer value that specifies the rounding operation. \n +/// Bits [7:4] are reserved. \n +/// Bit [3] is a precision exception value: \n +/// 0: A normal PE exception is used. \n +/// 1: The PE field is not updated. \n +/// Bit [2] is the rounding control source: \n +/// 0: Use bits [1:0] of \a M. \n +/// 1: Use the current MXCSR setting. \n +/// Bits [1:0] contain the rounding control definition: \n +/// 00: Nearest. \n +/// 01: Downward (toward negative infinity). \n +/// 10: Upward (toward positive infinity). \n +/// 11: Truncated. +/// \returns A 256-bit vector of [8 x float] containing the rounded values. #define _mm256_round_ps(V, M) __extension__ ({ \ - __m256 __V = (V); \ - (__m256)__builtin_ia32_roundps256((__v8sf)__V, (M)); }) + (__m256)__builtin_ia32_roundps256((__v8sf)(__m256)(V), (M)); }) +/// \brief Rounds up the values stored in a 256-bit vector of [4 x double]. The +/// source values are rounded up to integer values and returned as 64-bit +/// double-precision floating-point values. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_ceil_pd(__m256d V); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPD instruction. +/// +/// \param V +/// A 256-bit vector of [4 x double]. +/// \returns A 256-bit vector of [4 x double] containing the rounded up values. #define _mm256_ceil_pd(V) _mm256_round_pd((V), _MM_FROUND_CEIL) + +/// \brief Rounds down the values stored in a 256-bit vector of [4 x double]. +/// The source values are rounded down to integer values and returned as +/// 64-bit double-precision floating-point values. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_floor_pd(__m256d V); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPD instruction. +/// +/// \param V +/// A 256-bit vector of [4 x double]. +/// \returns A 256-bit vector of [4 x double] containing the rounded down +/// values. #define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR) + +/// \brief Rounds up the values stored in a 256-bit vector of [8 x float]. The +/// source values are rounded up to integer values and returned as +/// floating-point values. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_ceil_ps(__m256 V); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPS instruction. +/// +/// \param V +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit vector of [8 x float] containing the rounded up values. #define _mm256_ceil_ps(V) _mm256_round_ps((V), _MM_FROUND_CEIL) + +/// \brief Rounds down the values stored in a 256-bit vector of [8 x float]. The +/// source values are rounded down to integer values and returned as +/// floating-point values. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_floor_ps(__m256 V); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPS instruction. +/// +/// \param V +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit vector of [8 x float] containing the rounded down values. #define _mm256_floor_ps(V) _mm256_round_ps((V), _MM_FROUND_FLOOR) /* Logical */ +/// \brief Performs a bitwise AND of two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VANDPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \returns A 256-bit vector of [4 x double] containing the bitwise AND of the +/// values between both operands. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_and_pd(__m256d __a, __m256d __b) { - return (__m256d)((__v4di)__a & (__v4di)__b); + return (__m256d)((__v4du)__a & (__v4du)__b); } +/// \brief Performs a bitwise AND of two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VANDPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \returns A 256-bit vector of [8 x float] containing the bitwise AND of the +/// values between both operands. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_and_ps(__m256 __a, __m256 __b) { - return (__m256)((__v8si)__a & (__v8si)__b); + return (__m256)((__v8su)__a & (__v8su)__b); } +/// \brief Performs a bitwise AND of two 256-bit vectors of [4 x double], using +/// the one's complement of the values contained in the first source operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VANDNPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing the left source operand. The +/// one's complement of this value is used in the bitwise AND. +/// \param __b +/// A 256-bit vector of [4 x double] containing the right source operand. +/// \returns A 256-bit vector of [4 x double] containing the bitwise AND of the +/// values of the second operand and the one's complement of the first +/// operand. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_andnot_pd(__m256d __a, __m256d __b) { - return (__m256d)(~(__v4di)__a & (__v4di)__b); + return (__m256d)(~(__v4du)__a & (__v4du)__b); } +/// \brief Performs a bitwise AND of two 256-bit vectors of [8 x float], using +/// the one's complement of the values contained in the first source operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VANDNPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing the left source operand. The +/// one's complement of this value is used in the bitwise AND. +/// \param __b +/// A 256-bit vector of [8 x float] containing the right source operand. +/// \returns A 256-bit vector of [8 x float] containing the bitwise AND of the +/// values of the second operand and the one's complement of the first +/// operand. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_andnot_ps(__m256 __a, __m256 __b) { - return (__m256)(~(__v8si)__a & (__v8si)__b); + return (__m256)(~(__v8su)__a & (__v8su)__b); } +/// \brief Performs a bitwise OR of two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VORPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \returns A 256-bit vector of [4 x double] containing the bitwise OR of the +/// values between both operands. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_or_pd(__m256d __a, __m256d __b) { - return (__m256d)((__v4di)__a | (__v4di)__b); + return (__m256d)((__v4du)__a | (__v4du)__b); } +/// \brief Performs a bitwise OR of two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VORPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \returns A 256-bit vector of [8 x float] containing the bitwise OR of the +/// values between both operands. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_or_ps(__m256 __a, __m256 __b) { - return (__m256)((__v8si)__a | (__v8si)__b); + return (__m256)((__v8su)__a | (__v8su)__b); } +/// \brief Performs a bitwise XOR of two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \returns A 256-bit vector of [4 x double] containing the bitwise XOR of the +/// values between both operands. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_xor_pd(__m256d __a, __m256d __b) { - return (__m256d)((__v4di)__a ^ (__v4di)__b); + return (__m256d)((__v4du)__a ^ (__v4du)__b); } +/// \brief Performs a bitwise XOR of two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \returns A 256-bit vector of [8 x float] containing the bitwise XOR of the +/// values between both operands. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_xor_ps(__m256 __a, __m256 __b) { - return (__m256)((__v8si)__a ^ (__v8si)__b); + return (__m256)((__v8su)__a ^ (__v8su)__b); } /* Horizontal arithmetic */ +/// \brief Horizontally adds the adjacent pairs of values contained in two +/// 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VHADDPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// The horizontal sums of the values are returned in the even-indexed +/// elements of a vector of [4 x double]. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// The horizontal sums of the values are returned in the odd-indexed +/// elements of a vector of [4 x double]. +/// \returns A 256-bit vector of [4 x double] containing the horizontal sums of +/// both operands. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_hadd_pd(__m256d __a, __m256d __b) { return (__m256d)__builtin_ia32_haddpd256((__v4df)__a, (__v4df)__b); } +/// \brief Horizontally adds the adjacent pairs of values contained in two +/// 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VHADDPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// The horizontal sums of the values are returned in the elements with +/// index 0, 1, 4, 5 of a vector of [8 x float]. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// The horizontal sums of the values are returned in the elements with +/// index 2, 3, 6, 7 of a vector of [8 x float]. +/// \returns A 256-bit vector of [8 x float] containing the horizontal sums of +/// both operands. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_hadd_ps(__m256 __a, __m256 __b) { return (__m256)__builtin_ia32_haddps256((__v8sf)__a, (__v8sf)__b); } +/// \brief Horizontally subtracts the adjacent pairs of values contained in two +/// 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VHSUBPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// The horizontal differences between the values are returned in the +/// even-indexed elements of a vector of [4 x double]. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// The horizontal differences between the values are returned in the +/// odd-indexed elements of a vector of [4 x double]. +/// \returns A 256-bit vector of [4 x double] containing the horizontal +/// differences of both operands. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_hsub_pd(__m256d __a, __m256d __b) { return (__m256d)__builtin_ia32_hsubpd256((__v4df)__a, (__v4df)__b); } +/// \brief Horizontally subtracts the adjacent pairs of values contained in two +/// 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VHSUBPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// The horizontal differences between the values are returned in the +/// elements with index 0, 1, 4, 5 of a vector of [8 x float]. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// The horizontal differences between the values are returned in the +/// elements with index 2, 3, 6, 7 of a vector of [8 x float]. +/// \returns A 256-bit vector of [8 x float] containing the horizontal +/// differences of both operands. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_hsub_ps(__m256 __a, __m256 __b) { @@ -239,87 +757,638 @@ _mm256_hsub_ps(__m256 __a, __m256 __b) } /* Vector permutations */ +/// \brief Copies the values in a 128-bit vector of [2 x double] as specified +/// by the 128-bit integer vector operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __c +/// A 128-bit integer vector operand specifying how the values are to be +/// copied. \n +/// Bit [1]: \n +/// 0: Bits [63:0] of the source are copied to bits [63:0] of the returned +/// vector. \n +/// 1: Bits [127:64] of the source are copied to bits [63:0] of the +/// returned vector. \n +/// Bit [65]: \n +/// 0: Bits [63:0] of the source are copied to bits [127:64] of the +/// returned vector. \n +/// 1: Bits [127:64] of the source are copied to bits [127:64] of the +/// returned vector. +/// \returns A 128-bit vector of [2 x double] containing the copied values. static __inline __m128d __DEFAULT_FN_ATTRS _mm_permutevar_pd(__m128d __a, __m128i __c) { return (__m128d)__builtin_ia32_vpermilvarpd((__v2df)__a, (__v2di)__c); } +/// \brief Copies the values in a 256-bit vector of [4 x double] as specified +/// by the 256-bit integer vector operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \param __c +/// A 256-bit integer vector operand specifying how the values are to be +/// copied. \n +/// Bit [1]: \n +/// 0: Bits [63:0] of the source are copied to bits [63:0] of the returned +/// vector. \n +/// 1: Bits [127:64] of the source are copied to bits [63:0] of the +/// returned vector. \n +/// Bit [65]: \n +/// 0: Bits [63:0] of the source are copied to bits [127:64] of the +/// returned vector. \n +/// 1: Bits [127:64] of the source are copied to bits [127:64] of the +/// returned vector. \n +/// Bit [129]: \n +/// 0: Bits [191:128] of the source are copied to bits [191:128] of the +/// returned vector. \n +/// 1: Bits [255:192] of the source are copied to bits [191:128] of the +/// returned vector. \n +/// Bit [193]: \n +/// 0: Bits [191:128] of the source are copied to bits [255:192] of the +/// returned vector. \n +/// 1: Bits [255:192] of the source are copied to bits [255:192] of the +/// returned vector. +/// \returns A 256-bit vector of [4 x double] containing the copied values. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_permutevar_pd(__m256d __a, __m256i __c) { return (__m256d)__builtin_ia32_vpermilvarpd256((__v4df)__a, (__v4di)__c); } +/// \brief Copies the values stored in a 128-bit vector of [4 x float] as +/// specified by the 128-bit integer vector operand. +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __c +/// A 128-bit integer vector operand specifying how the values are to be +/// copied. \n +/// Bits [1:0]: \n +/// 00: Bits [31:0] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// Bits [33:32]: \n +/// 00: Bits [31:0] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// Bits [65:64]: \n +/// 00: Bits [31:0] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// Bits [97:96]: \n +/// 00: Bits [31:0] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [127:96] of the +/// returned vector. +/// \returns A 128-bit vector of [4 x float] containing the copied values. static __inline __m128 __DEFAULT_FN_ATTRS _mm_permutevar_ps(__m128 __a, __m128i __c) { return (__m128)__builtin_ia32_vpermilvarps((__v4sf)__a, (__v4si)__c); } +/// \brief Copies the values stored in a 256-bit vector of [8 x float] as +/// specified by the 256-bit integer vector operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \param __c +/// A 256-bit integer vector operand specifying how the values are to be +/// copied. \n +/// Bits [1:0]: \n +/// 00: Bits [31:0] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// Bits [33:32]: \n +/// 00: Bits [31:0] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// Bits [65:64]: \n +/// 00: Bits [31:0] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// Bits [97:96]: \n +/// 00: Bits [31:0] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// Bits [129:128]: \n +/// 00: Bits [159:128] of the source are copied to bits [159:128] of the +/// returned vector. \n +/// 01: Bits [191:160] of the source are copied to bits [159:128] of the +/// returned vector. \n +/// 10: Bits [223:192] of the source are copied to bits [159:128] of the +/// returned vector. \n +/// 11: Bits [255:224] of the source are copied to bits [159:128] of the +/// returned vector. \n +/// Bits [161:160]: \n +/// 00: Bits [159:128] of the source are copied to bits [191:160] of the +/// returned vector. \n +/// 01: Bits [191:160] of the source are copied to bits [191:160] of the +/// returned vector. \n +/// 10: Bits [223:192] of the source are copied to bits [191:160] of the +/// returned vector. \n +/// 11: Bits [255:224] of the source are copied to bits [191:160] of the +/// returned vector. \n +/// Bits [193:192]: \n +/// 00: Bits [159:128] of the source are copied to bits [223:192] of the +/// returned vector. \n +/// 01: Bits [191:160] of the source are copied to bits [223:192] of the +/// returned vector. \n +/// 10: Bits [223:192] of the source are copied to bits [223:192] of the +/// returned vector. \n +/// 11: Bits [255:224] of the source are copied to bits [223:192] of the +/// returned vector. \n +/// Bits [225:224]: \n +/// 00: Bits [159:128] of the source are copied to bits [255:224] of the +/// returned vector. \n +/// 01: Bits [191:160] of the source are copied to bits [255:224] of the +/// returned vector. \n +/// 10: Bits [223:192] of the source are copied to bits [255:224] of the +/// returned vector. \n +/// 11: Bits [255:224] of the source are copied to bits [255:224] of the +/// returned vector. +/// \returns A 256-bit vector of [8 x float] containing the copied values. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_permutevar_ps(__m256 __a, __m256i __c) { return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)__a, (__v8si)__c); } +/// \brief Copies the values in a 128-bit vector of [2 x double] as specified +/// by the immediate integer operand. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_permute_pd(__m128d A, const int C); +/// \endcode +/// +/// This intrinsic corresponds to the VPERMILPD instruction. +/// +/// \param A +/// A 128-bit vector of [2 x double]. +/// \param C +/// An immediate integer operand specifying how the values are to be +/// copied. \n +/// Bit [0]: \n +/// 0: Bits [63:0] of the source are copied to bits [63:0] of the returned +/// vector. \n +/// 1: Bits [127:64] of the source are copied to bits [63:0] of the +/// returned vector. \n +/// Bit [1]: \n +/// 0: Bits [63:0] of the source are copied to bits [127:64] of the +/// returned vector. \n +/// 1: Bits [127:64] of the source are copied to bits [127:64] of the +/// returned vector. +/// \returns A 128-bit vector of [2 x double] containing the copied values. #define _mm_permute_pd(A, C) __extension__ ({ \ - __m128d __A = (A); \ - (__m128d)__builtin_shufflevector((__v2df)__A, (__v2df) _mm_setzero_pd(), \ - (C) & 0x1, ((C) & 0x2) >> 1); }) + (__m128d)__builtin_shufflevector((__v2df)(__m128d)(A), \ + (__v2df)_mm_undefined_pd(), \ + ((C) >> 0) & 0x1, ((C) >> 1) & 0x1); }) +/// \brief Copies the values in a 256-bit vector of [4 x double] as specified by +/// the immediate integer operand. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_permute_pd(__m256d A, const int C); +/// \endcode +/// +/// This intrinsic corresponds to the VPERMILPD instruction. +/// +/// \param A +/// A 256-bit vector of [4 x double]. +/// \param C +/// An immediate integer operand specifying how the values are to be +/// copied. \n +/// Bit [0]: \n +/// 0: Bits [63:0] of the source are copied to bits [63:0] of the returned +/// vector. \n +/// 1: Bits [127:64] of the source are copied to bits [63:0] of the +/// returned vector. \n +/// Bit [1]: \n +/// 0: Bits [63:0] of the source are copied to bits [127:64] of the +/// returned vector. \n +/// 1: Bits [127:64] of the source are copied to bits [127:64] of the +/// returned vector. \n +/// Bit [2]: \n +/// 0: Bits [191:128] of the source are copied to bits [191:128] of the +/// returned vector. \n +/// 1: Bits [255:192] of the source are copied to bits [191:128] of the +/// returned vector. \n +/// Bit [3]: \n +/// 0: Bits [191:128] of the source are copied to bits [255:192] of the +/// returned vector. \n +/// 1: Bits [255:192] of the source are copied to bits [255:192] of the +/// returned vector. +/// \returns A 256-bit vector of [4 x double] containing the copied values. #define _mm256_permute_pd(A, C) __extension__ ({ \ - __m256d __A = (A); \ - (__m256d)__builtin_shufflevector((__v4df)__A, (__v4df) _mm256_setzero_pd(), \ - (C) & 0x1, ((C) & 0x2) >> 1, \ - 2 + (((C) & 0x4) >> 2), \ - 2 + (((C) & 0x8) >> 3)); }) + (__m256d)__builtin_shufflevector((__v4df)(__m256d)(A), \ + (__v4df)_mm256_undefined_pd(), \ + 0 + (((C) >> 0) & 0x1), \ + 0 + (((C) >> 1) & 0x1), \ + 2 + (((C) >> 2) & 0x1), \ + 2 + (((C) >> 3) & 0x1)); }) +/// \brief Copies the values in a 128-bit vector of [4 x float] as specified by +/// the immediate integer operand. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_permute_ps(__m128 A, const int C); +/// \endcode +/// +/// This intrinsic corresponds to the VPERMILPS instruction. +/// +/// \param A +/// A 128-bit vector of [4 x float]. +/// \param C +/// An immediate integer operand specifying how the values are to be +/// copied. \n +/// Bits [1:0]: \n +/// 00: Bits [31:0] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// Bits [3:2]: \n +/// 00: Bits [31:0] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// Bits [5:4]: \n +/// 00: Bits [31:0] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// Bits [7:6]: \n +/// 00: Bits [31:0] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [127:96] of the +/// returned vector. +/// \returns A 128-bit vector of [4 x float] containing the copied values. #define _mm_permute_ps(A, C) __extension__ ({ \ - __m128 __A = (A); \ - (__m128)__builtin_shufflevector((__v4sf)__A, (__v4sf) _mm_setzero_ps(), \ - (C) & 0x3, ((C) & 0xc) >> 2, \ - ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6); }) + (__m128)__builtin_shufflevector((__v4sf)(__m128)(A), \ + (__v4sf)_mm_undefined_ps(), \ + ((C) >> 0) & 0x3, ((C) >> 2) & 0x3, \ + ((C) >> 4) & 0x3, ((C) >> 6) & 0x3); }) +/// \brief Copies the values in a 256-bit vector of [8 x float] as specified by +/// the immediate integer operand. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_permute_ps(__m256 A, const int C); +/// \endcode +/// +/// This intrinsic corresponds to the VPERMILPS instruction. +/// +/// \param A +/// A 256-bit vector of [8 x float]. +/// \param C +/// An immediate integer operand specifying how the values are to be \n +/// copied. \n +/// Bits [1:0]: \n +/// 00: Bits [31:0] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// Bits [3:2]: \n +/// 00: Bits [31:0] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// Bits [5:4]: \n +/// 00: Bits [31:0] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// Bits [7:6]: \n +/// 00: Bits [31:qq0] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// Bits [1:0]: \n +/// 00: Bits [159:128] of the source are copied to bits [159:128] of the +/// returned vector. \n +/// 01: Bits [191:160] of the source are copied to bits [159:128] of the +/// returned vector. \n +/// 10: Bits [223:192] of the source are copied to bits [159:128] of the +/// returned vector. \n +/// 11: Bits [255:224] of the source are copied to bits [159:128] of the +/// returned vector. \n +/// Bits [3:2]: \n +/// 00: Bits [159:128] of the source are copied to bits [191:160] of the +/// returned vector. \n +/// 01: Bits [191:160] of the source are copied to bits [191:160] of the +/// returned vector. \n +/// 10: Bits [223:192] of the source are copied to bits [191:160] of the +/// returned vector. \n +/// 11: Bits [255:224] of the source are copied to bits [191:160] of the +/// returned vector. \n +/// Bits [5:4]: \n +/// 00: Bits [159:128] of the source are copied to bits [223:192] of the +/// returned vector. \n +/// 01: Bits [191:160] of the source are copied to bits [223:192] of the +/// returned vector. \n +/// 10: Bits [223:192] of the source are copied to bits [223:192] of the +/// returned vector. \n +/// 11: Bits [255:224] of the source are copied to bits [223:192] of the +/// returned vector. \n +/// Bits [7:6]: \n +/// 00: Bits [159:128] of the source are copied to bits [255:224] of the +/// returned vector. \n +/// 01: Bits [191:160] of the source are copied to bits [255:224] of the +/// returned vector. \n +/// 10: Bits [223:192] of the source are copied to bits [255:224] of the +/// returned vector. \n +/// 11: Bits [255:224] of the source are copied to bits [255:224] of the +/// returned vector. +/// \returns A 256-bit vector of [8 x float] containing the copied values. #define _mm256_permute_ps(A, C) __extension__ ({ \ - __m256 __A = (A); \ - (__m256)__builtin_shufflevector((__v8sf)__A, (__v8sf) _mm256_setzero_ps(), \ - (C) & 0x3, ((C) & 0xc) >> 2, \ - ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6, \ - 4 + (((C) & 0x03) >> 0), \ - 4 + (((C) & 0x0c) >> 2), \ - 4 + (((C) & 0x30) >> 4), \ - 4 + (((C) & 0xc0) >> 6)); }) + (__m256)__builtin_shufflevector((__v8sf)(__m256)(A), \ + (__v8sf)_mm256_undefined_ps(), \ + 0 + (((C) >> 0) & 0x3), \ + 0 + (((C) >> 2) & 0x3), \ + 0 + (((C) >> 4) & 0x3), \ + 0 + (((C) >> 6) & 0x3), \ + 4 + (((C) >> 0) & 0x3), \ + 4 + (((C) >> 2) & 0x3), \ + 4 + (((C) >> 4) & 0x3), \ + 4 + (((C) >> 6) & 0x3)); }) +/// \brief Permutes 128-bit data values stored in two 256-bit vectors of +/// [4 x double], as specified by the immediate integer operand. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_permute2f128_pd(__m256d V1, __m256d V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPERM2F128 instruction. +/// +/// \param V1 +/// A 256-bit vector of [4 x double]. +/// \param V2 +/// A 256-bit vector of [4 x double. +/// \param M +/// An immediate integer operand specifying how the values are to be +/// permuted. \n +/// Bits [1:0]: \n +/// 00: Bits [127:0] of operand \a V1 are copied to bits [127:0] of the +/// destination. \n +/// 01: Bits [255:128] of operand \a V1 are copied to bits [127:0] of the +/// destination. \n +/// 10: Bits [127:0] of operand \a V2 are copied to bits [127:0] of the +/// destination. \n +/// 11: Bits [255:128] of operand \a V2 are copied to bits [127:0] of the +/// destination. \n +/// Bits [5:4]: \n +/// 00: Bits [127:0] of operand \a V1 are copied to bits [255:128] of the +/// destination. \n +/// 01: Bits [255:128] of operand \a V1 are copied to bits [255:128] of the +/// destination. \n +/// 10: Bits [127:0] of operand \a V2 are copied to bits [255:128] of the +/// destination. \n +/// 11: Bits [255:128] of operand \a V2 are copied to bits [255:128] of the +/// destination. +/// \returns A 256-bit vector of [4 x double] containing the copied values. #define _mm256_permute2f128_pd(V1, V2, M) __extension__ ({ \ - __m256d __V1 = (V1); \ - __m256d __V2 = (V2); \ - (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)__V1, (__v4df)__V2, (M)); }) + (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)(__m256d)(V1), \ + (__v4df)(__m256d)(V2), (M)); }) +/// \brief Permutes 128-bit data values stored in two 256-bit vectors of +/// [8 x float], as specified by the immediate integer operand. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_permute2f128_ps(__m256 V1, __m256 V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPERM2F128 instruction. +/// +/// \param V1 +/// A 256-bit vector of [8 x float]. +/// \param V2 +/// A 256-bit vector of [8 x float]. +/// \param M +/// An immediate integer operand specifying how the values are to be +/// permuted. \n +/// Bits [1:0]: \n +/// 00: Bits [127:0] of operand \a V1 are copied to bits [127:0] of the +/// destination. \n +/// 01: Bits [255:128] of operand \a V1 are copied to bits [127:0] of the +/// destination. \n +/// 10: Bits [127:0] of operand \a V2 are copied to bits [127:0] of the +/// destination. \n +/// 11: Bits [255:128] of operand \a V2 are copied to bits [127:0] of the +/// destination. \n +/// Bits [5:4]: \n +/// 00: Bits [127:0] of operand \a V1 are copied to bits [255:128] of the +/// destination. \n +/// 01: Bits [255:128] of operand \a V1 are copied to bits [255:128] of the +/// destination. \n +/// 10: Bits [127:0] of operand \a V2 are copied to bits [255:128] of the +/// destination. \n +/// 11: Bits [255:128] of operand \a V2 are copied to bits [255:128] of the +/// destination. +/// \returns A 256-bit vector of [8 x float] containing the copied values. #define _mm256_permute2f128_ps(V1, V2, M) __extension__ ({ \ - __m256 __V1 = (V1); \ - __m256 __V2 = (V2); \ - (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)__V1, (__v8sf)__V2, (M)); }) + (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)(__m256)(V1), \ + (__v8sf)(__m256)(V2), (M)); }) +/// \brief Permutes 128-bit data values stored in two 256-bit integer vectors, +/// as specified by the immediate integer operand. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_permute2f128_si256(__m256i V1, __m256i V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPERM2F128 instruction. +/// +/// \param V1 +/// A 256-bit integer vector. +/// \param V2 +/// A 256-bit integer vector. +/// \param M +/// An immediate integer operand specifying how the values are to be copied. +/// Bits [1:0]: \n +/// 00: Bits [127:0] of operand \a V1 are copied to bits [127:0] of the +/// destination. \n +/// 01: Bits [255:128] of operand \a V1 are copied to bits [127:0] of the +/// destination. \n +/// 10: Bits [127:0] of operand \a V2 are copied to bits [127:0] of the +/// destination. \n +/// 11: Bits [255:128] of operand \a V2 are copied to bits [127:0] of the +/// destination. \n +/// Bits [5:4]: \n +/// 00: Bits [127:0] of operand \a V1 are copied to bits [255:128] of the +/// destination. \n +/// 01: Bits [255:128] of operand \a V1 are copied to bits [255:128] of the +/// destination. \n +/// 10: Bits [127:0] of operand \a V2 are copied to bits [255:128] of the +/// destination. \n +/// 11: Bits [255:128] of operand \a V2 are copied to bits [255:128] of the +/// destination. +/// \returns A 256-bit integer vector containing the copied values. #define _mm256_permute2f128_si256(V1, V2, M) __extension__ ({ \ - __m256i __V1 = (V1); \ - __m256i __V2 = (V2); \ - (__m256i)__builtin_ia32_vperm2f128_si256((__v8si)__V1, (__v8si)__V2, (M)); }) + (__m256i)__builtin_ia32_vperm2f128_si256((__v8si)(__m256i)(V1), \ + (__v8si)(__m256i)(V2), (M)); }) /* Vector Blend */ +/// \brief Merges 64-bit double-precision data values stored in either of the +/// two 256-bit vectors of [4 x double], as specified by the immediate +/// integer operand. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_blend_pd(__m256d V1, __m256d V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VBLENDPD instruction. +/// +/// \param V1 +/// A 256-bit vector of [4 x double]. +/// \param V2 +/// A 256-bit vector of [4 x double]. +/// \param M +/// An immediate integer operand, with mask bits [3:0] specifying how the +/// values are to be copied. The position of the mask bit corresponds to the +/// index of a copied value. When a mask bit is 0, the corresponding 64-bit +/// element in operand \a V1 is copied to the same position in the +/// destination. When a mask bit is 1, the corresponding 64-bit element in +/// operand \a V2 is copied to the same position in the destination. +/// \returns A 256-bit vector of [4 x double] containing the copied values. #define _mm256_blend_pd(V1, V2, M) __extension__ ({ \ - __m256d __V1 = (V1); \ - __m256d __V2 = (V2); \ - (__m256d)__builtin_shufflevector((__v4df)__V1, (__v4df)__V2, \ + (__m256d)__builtin_shufflevector((__v4df)(__m256d)(V1), \ + (__v4df)(__m256d)(V2), \ (((M) & 0x01) ? 4 : 0), \ (((M) & 0x02) ? 5 : 1), \ (((M) & 0x04) ? 6 : 2), \ (((M) & 0x08) ? 7 : 3)); }) +/// \brief Merges 32-bit single-precision data values stored in either of the +/// two 256-bit vectors of [8 x float], as specified by the immediate +/// integer operand. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_blend_ps(__m256 V1, __m256 V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VBLENDPS instruction. +/// +/// \param V1 +/// A 256-bit vector of [8 x float]. +/// \param V2 +/// A 256-bit vector of [8 x float]. +/// \param M +/// An immediate integer operand, with mask bits [7:0] specifying how the +/// values are to be copied. The position of the mask bit corresponds to the +/// index of a copied value. When a mask bit is 0, the corresponding 32-bit +/// element in operand \a V1 is copied to the same position in the +/// destination. When a mask bit is 1, the corresponding 32-bit element in +/// operand \a V2 is copied to the same position in the destination. +/// \returns A 256-bit vector of [8 x float] containing the copied values. #define _mm256_blend_ps(V1, V2, M) __extension__ ({ \ - __m256 __V1 = (V1); \ - __m256 __V2 = (V2); \ - (__m256)__builtin_shufflevector((__v8sf)__V1, (__v8sf)__V2, \ + (__m256)__builtin_shufflevector((__v8sf)(__m256)(V1), \ + (__v8sf)(__m256)(V2), \ (((M) & 0x01) ? 8 : 0), \ (((M) & 0x02) ? 9 : 1), \ (((M) & 0x04) ? 10 : 2), \ @@ -329,6 +1398,27 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c) (((M) & 0x40) ? 14 : 6), \ (((M) & 0x80) ? 15 : 7)); }) +/// \brief Merges 64-bit double-precision data values stored in either of the +/// two 256-bit vectors of [4 x double], as specified by the 256-bit vector +/// operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBLENDVPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \param __b +/// A 256-bit vector of [4 x double]. +/// \param __c +/// A 256-bit vector operand, with mask bits 255, 191, 127, and 63 specifying +/// how the values are to be copied. The position of the mask bit corresponds +/// to the most significant bit of a copied value. When a mask bit is 0, the +/// corresponding 64-bit element in operand \a __a is copied to the same +/// position in the destination. When a mask bit is 1, the corresponding +/// 64-bit element in operand \a __b is copied to the same position in the +/// destination. +/// \returns A 256-bit vector of [4 x double] containing the copied values. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_blendv_pd(__m256d __a, __m256d __b, __m256d __c) { @@ -336,6 +1426,27 @@ _mm256_blendv_pd(__m256d __a, __m256d __b, __m256d __c) (__v4df)__a, (__v4df)__b, (__v4df)__c); } +/// \brief Merges 32-bit single-precision data values stored in either of the +/// two 256-bit vectors of [8 x float], as specified by the 256-bit vector +/// operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBLENDVPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \param __b +/// A 256-bit vector of [8 x float]. +/// \param __c +/// A 256-bit vector operand, with mask bits 255, 223, 191, 159, 127, 95, 63, +/// and 31 specifying how the values are to be copied. The position of the +/// mask bit corresponds to the most significant bit of a copied value. When +/// a mask bit is 0, the corresponding 32-bit element in operand \a __a is +/// copied to the same position in the destination. When a mask bit is 1, the +/// corresponding 32-bit element in operand \a __b is copied to the same +/// position in the destination. +/// \returns A 256-bit vector of [8 x float] containing the copied values. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c) { @@ -344,29 +1455,155 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c) } /* Vector Dot Product */ +/// \brief Computes two dot products in parallel, using the lower and upper +/// halves of two [8 x float] vectors as input to the two computations, and +/// returning the two dot products in the lower and upper halves of the +/// [8 x float] result. The immediate integer operand controls which input +/// elements will contribute to the dot product, and where the final results +/// are returned. In general, for each dot product, the four corresponding +/// elements of the input vectors are multiplied; the first two and second +/// two products are summed, then the two sums are added to form the final +/// result. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_dp_ps(__m256 V1, __m256 V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VDPPS instruction. +/// +/// \param V1 +/// A vector of [8 x float] values, treated as two [4 x float] vectors. +/// \param V2 +/// A vector of [8 x float] values, treated as two [4 x float] vectors. +/// \param M +/// An immediate integer argument. Bits [7:4] determine which elements of +/// the input vectors are used, with bit [4] corresponding to the lowest +/// element and bit [7] corresponding to the highest element of each [4 x +/// float] subvector. If a bit is set, the corresponding elements from the +/// two input vectors are used as an input for dot product; otherwise that +/// input is treated as zero. Bits [3:0] determine which elements of the +/// result will receive a copy of the final dot product, with bit [0] +/// corresponding to the lowest element and bit [3] corresponding to the +/// highest element of each [4 x float] subvector. If a bit is set, the dot +/// product is returned in the corresponding element; otherwise that element +/// is set to zero. The bitmask is applied in the same way to each of the +/// two parallel dot product computations. +/// \returns A 256-bit vector of [8 x float] containing the two dot products. #define _mm256_dp_ps(V1, V2, M) __extension__ ({ \ - __m256 __V1 = (V1); \ - __m256 __V2 = (V2); \ - (__m256)__builtin_ia32_dpps256((__v8sf)__V1, (__v8sf)__V2, (M)); }) + (__m256)__builtin_ia32_dpps256((__v8sf)(__m256)(V1), \ + (__v8sf)(__m256)(V2), (M)); }) /* Vector shuffle */ +/// \brief Selects 8 float values from the 256-bit operands of [8 x float], as +/// specified by the immediate value operand. The four selected elements in +/// each operand are copied to the destination according to the bits +/// specified in the immediate operand. The selected elements from the first +/// 256-bit operand are copied to bits [63:0] and bits [191:128] of the +/// destination, and the selected elements from the second 256-bit operand +/// are copied to bits [127:64] and bits [255:192] of the destination. For +/// example, if bits [7:0] of the immediate operand contain a value of 0xFF, +/// the 256-bit destination vector would contain the following values: b[7], +/// b[7], a[7], a[7], b[3], b[3], a[3], a[3]. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_shuffle_ps(__m256 a, __m256 b, const int mask); +/// \endcode +/// +/// This intrinsic corresponds to the VSHUFPS instruction. +/// +/// \param a +/// A 256-bit vector of [8 x float]. The four selected elements in this +/// operand are copied to bits [63:0] and bits [191:128] in the destination, +/// according to the bits specified in the immediate operand. +/// \param b +/// A 256-bit vector of [8 x float]. The four selected elements in this +/// operand are copied to bits [127:64] and bits [255:192] in the +/// destination, according to the bits specified in the immediate operand. +/// \param mask +/// An immediate value containing an 8-bit value specifying which elements to +/// copy from \a a and \a b \n. +/// Bits [3:0] specify the values copied from operand \a a. \n +/// Bits [7:4] specify the values copied from operand \a b. \n +/// The destinations within the 256-bit destination are assigned values as +/// follows, according to the bit value assignments described below: \n +/// Bits [1:0] are used to assign values to bits [31:0] and [159:128] in the +/// destination. \n +/// Bits [3:2] are used to assign values to bits [63:32] and [191:160] in the +/// destination. \n +/// Bits [5:4] are used to assign values to bits [95:64] and [223:192] in the +/// destination. \n +/// Bits [7:6] are used to assign values to bits [127:96] and [255:224] in +/// the destination. \n +/// Bit value assignments: \n +/// 00: Bits [31:0] and [159:128] are copied from the selected operand. \n +/// 01: Bits [63:32] and [191:160] are copied from the selected operand. \n +/// 10: Bits [95:64] and [223:192] are copied from the selected operand. \n +/// 11: Bits [127:96] and [255:224] are copied from the selected operand. +/// \returns A 256-bit vector of [8 x float] containing the shuffled values. #define _mm256_shuffle_ps(a, b, mask) __extension__ ({ \ - __m256 __a = (a); \ - __m256 __b = (b); \ - (__m256)__builtin_shufflevector((__v8sf)__a, (__v8sf)__b, \ - (mask) & 0x3, ((mask) & 0xc) >> 2, \ - (((mask) & 0x30) >> 4) + 8, (((mask) & 0xc0) >> 6) + 8, \ - ((mask) & 0x3) + 4, (((mask) & 0xc) >> 2) + 4, \ - (((mask) & 0x30) >> 4) + 12, (((mask) & 0xc0) >> 6) + 12); }) + (__m256)__builtin_shufflevector((__v8sf)(__m256)(a), \ + (__v8sf)(__m256)(b), \ + 0 + (((mask) >> 0) & 0x3), \ + 0 + (((mask) >> 2) & 0x3), \ + 8 + (((mask) >> 4) & 0x3), \ + 8 + (((mask) >> 6) & 0x3), \ + 4 + (((mask) >> 0) & 0x3), \ + 4 + (((mask) >> 2) & 0x3), \ + 12 + (((mask) >> 4) & 0x3), \ + 12 + (((mask) >> 6) & 0x3)); }) +/// \brief Selects four double-precision values from the 256-bit operands of +/// [4 x double], as specified by the immediate value operand. The selected +/// elements from the first 256-bit operand are copied to bits [63:0] and +/// bits [191:128] in the destination, and the selected elements from the +/// second 256-bit operand are copied to bits [127:64] and bits [255:192] in +/// the destination. For example, if bits [3:0] of the immediate operand +/// contain a value of 0xF, the 256-bit destination vector would contain the +/// following values: b[3], a[3], b[1], a[1]. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_shuffle_pd(__m256d a, __m256d b, const int mask); +/// \endcode +/// +/// This intrinsic corresponds to the VSHUFPD instruction. +/// +/// \param a +/// A 256-bit vector of [4 x double]. +/// \param b +/// A 256-bit vector of [4 x double]. +/// \param mask +/// An immediate value containing 8-bit values specifying which elements to +/// copy from \a a and \a b: \n +/// Bit [0]=0: Bits [63:0] are copied from \a a to bits [63:0] of the +/// destination. \n +/// Bit [0]=1: Bits [127:64] are copied from \a a to bits [63:0] of the +/// destination. \n +/// Bit [1]=0: Bits [63:0] are copied from \a b to bits [127:64] of the +/// destination. \n +/// Bit [1]=1: Bits [127:64] are copied from \a b to bits [127:64] of the +/// destination. \n +/// Bit [2]=0: Bits [191:128] are copied from \a a to bits [191:128] of the +/// destination. \n +/// Bit [2]=1: Bits [255:192] are copied from \a a to bits [191:128] of the +/// destination. \n +/// Bit [3]=0: Bits [191:128] are copied from \a b to bits [255:192] of the +/// destination. \n +/// Bit [3]=1: Bits [255:192] are copied from \a b to bits [255:192] of the +/// destination. +/// \returns A 256-bit vector of [4 x double] containing the shuffled values. #define _mm256_shuffle_pd(a, b, mask) __extension__ ({ \ - __m256d __a = (a); \ - __m256d __b = (b); \ - (__m256d)__builtin_shufflevector((__v4df)__a, (__v4df)__b, \ - (mask) & 0x1, \ - (((mask) & 0x2) >> 1) + 4, \ - (((mask) & 0x4) >> 2) + 2, \ - (((mask) & 0x8) >> 3) + 6); }) + (__m256d)__builtin_shufflevector((__v4df)(__m256d)(a), \ + (__v4df)(__m256d)(b), \ + 0 + (((mask) >> 0) & 0x1), \ + 4 + (((mask) >> 1) & 0x1), \ + 2 + (((mask) >> 2) & 0x1), \ + 6 + (((mask) >> 3) & 0x1)); }) /* Compare */ #define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */ @@ -402,36 +1639,241 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c) #define _CMP_GT_OQ 0x1e /* Greater-than (ordered, non-signaling) */ #define _CMP_TRUE_US 0x1f /* True (unordered, signaling) */ +/// \brief Compares each of the corresponding double-precision values of two +/// 128-bit vectors of [2 x double], using the operation specified by the +/// immediate integer operand. Returns a [2 x double] vector consisting of +/// two doubles corresponding to the two comparison results: zero if the +/// comparison is false, and all 1's if the comparison is true. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_cmp_pd(__m128d a, __m128d b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the VCMPPD instruction. +/// +/// \param a +/// A 128-bit vector of [2 x double]. +/// \param b +/// A 128-bit vector of [2 x double]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 00h, 08h, 10h, 18h: Equal \n +/// 01h, 09h, 11h, 19h: Less than \n +/// 02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal +/// (swapped operands) \n +/// 03h, 0Bh, 13h, 1Bh: Unordered \n +/// 04h, 0Ch, 14h, 1Ch: Not equal \n +/// 05h, 0Dh, 15h, 1Dh: Not less than / Not greater than +/// (swapped operands) \n +/// 06h, 0Eh, 16h, 1Eh: Not less than or equal / Not greater than or equal +/// (swapped operands) \n +/// 07h, 0Fh, 17h, 1Fh: Ordered +/// \returns A 128-bit vector of [2 x double] containing the comparison results. #define _mm_cmp_pd(a, b, c) __extension__ ({ \ - __m128d __a = (a); \ - __m128d __b = (b); \ - (__m128d)__builtin_ia32_cmppd((__v2df)__a, (__v2df)__b, (c)); }) + (__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), \ + (__v2df)(__m128d)(b), (c)); }) +/// \brief Compares each of the corresponding values of two 128-bit vectors of +/// [4 x float], using the operation specified by the immediate integer +/// operand. Returns a [4 x float] vector consisting of four floats +/// corresponding to the four comparison results: zero if the comparison is +/// false, and all 1's if the comparison is true. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_cmp_ps(__m128 a, __m128 b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the VCMPPS instruction. +/// +/// \param a +/// A 128-bit vector of [4 x float]. +/// \param b +/// A 128-bit vector of [4 x float]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 00h, 08h, 10h, 18h: Equal \n +/// 01h, 09h, 11h, 19h: Less than \n +/// 02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal +/// (swapped operands) \n +/// 03h, 0Bh, 13h, 1Bh: Unordered \n +/// 04h, 0Ch, 14h, 1Ch: Not equal \n +/// 05h, 0Dh, 15h, 1Dh: Not less than / Not greater than +/// (swapped operands) \n +/// 06h, 0Eh, 16h, 1Eh: Not less than or equal / Not greater than or equal +/// (swapped operands) \n +/// 07h, 0Fh, 17h, 1Fh: Ordered +/// \returns A 128-bit vector of [4 x float] containing the comparison results. #define _mm_cmp_ps(a, b, c) __extension__ ({ \ - __m128 __a = (a); \ - __m128 __b = (b); \ - (__m128)__builtin_ia32_cmpps((__v4sf)__a, (__v4sf)__b, (c)); }) + (__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), \ + (__v4sf)(__m128)(b), (c)); }) +/// \brief Compares each of the corresponding double-precision values of two +/// 256-bit vectors of [4 x double], using the operation specified by the +/// immediate integer operand. Returns a [4 x double] vector consisting of +/// four doubles corresponding to the four comparison results: zero if the +/// comparison is false, and all 1's if the comparison is true. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_cmp_pd(__m256d a, __m256d b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the VCMPPD instruction. +/// +/// \param a +/// A 256-bit vector of [4 x double]. +/// \param b +/// A 256-bit vector of [4 x double]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 00h, 08h, 10h, 18h: Equal \n +/// 01h, 09h, 11h, 19h: Less than \n +/// 02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal +/// (swapped operands) \n +/// 03h, 0Bh, 13h, 1Bh: Unordered \n +/// 04h, 0Ch, 14h, 1Ch: Not equal \n +/// 05h, 0Dh, 15h, 1Dh: Not less than / Not greater than +/// (swapped operands) \n +/// 06h, 0Eh, 16h, 1Eh: Not less than or equal / Not greater than or equal +/// (swapped operands) \n +/// 07h, 0Fh, 17h, 1Fh: Ordered +/// \returns A 256-bit vector of [4 x double] containing the comparison results. #define _mm256_cmp_pd(a, b, c) __extension__ ({ \ - __m256d __a = (a); \ - __m256d __b = (b); \ - (__m256d)__builtin_ia32_cmppd256((__v4df)__a, (__v4df)__b, (c)); }) + (__m256d)__builtin_ia32_cmppd256((__v4df)(__m256d)(a), \ + (__v4df)(__m256d)(b), (c)); }) +/// \brief Compares each of the corresponding values of two 256-bit vectors of +/// [8 x float], using the operation specified by the immediate integer +/// operand. Returns a [8 x float] vector consisting of eight floats +/// corresponding to the eight comparison results: zero if the comparison is +/// false, and all 1's if the comparison is true. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_cmp_ps(__m256 a, __m256 b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the VCMPPS instruction. +/// +/// \param a +/// A 256-bit vector of [8 x float]. +/// \param b +/// A 256-bit vector of [8 x float]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 00h, 08h, 10h, 18h: Equal \n +/// 01h, 09h, 11h, 19h: Less than \n +/// 02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal +/// (swapped operands) \n +/// 03h, 0Bh, 13h, 1Bh: Unordered \n +/// 04h, 0Ch, 14h, 1Ch: Not equal \n +/// 05h, 0Dh, 15h, 1Dh: Not less than / Not greater than +/// (swapped operands) \n +/// 06h, 0Eh, 16h, 1Eh: Not less than or equal / Not greater than or equal +/// (swapped operands) \n +/// 07h, 0Fh, 17h, 1Fh: Ordered +/// \returns A 256-bit vector of [8 x float] containing the comparison results. #define _mm256_cmp_ps(a, b, c) __extension__ ({ \ - __m256 __a = (a); \ - __m256 __b = (b); \ - (__m256)__builtin_ia32_cmpps256((__v8sf)__a, (__v8sf)__b, (c)); }) + (__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(a), \ + (__v8sf)(__m256)(b), (c)); }) +/// \brief Compares each of the corresponding scalar double-precision values of +/// two 128-bit vectors of [2 x double], using the operation specified by the +/// immediate integer operand. If the result is true, all 64 bits of the +/// destination vector are set; otherwise they are cleared. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_cmp_sd(__m128d a, __m128d b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the VCMPSD instruction. +/// +/// \param a +/// A 128-bit vector of [2 x double]. +/// \param b +/// A 128-bit vector of [2 x double]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 00h, 08h, 10h, 18h: Equal \n +/// 01h, 09h, 11h, 19h: Less than \n +/// 02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal +/// (swapped operands) \n +/// 03h, 0Bh, 13h, 1Bh: Unordered \n +/// 04h, 0Ch, 14h, 1Ch: Not equal \n +/// 05h, 0Dh, 15h, 1Dh: Not less than / Not greater than +/// (swapped operands) \n +/// 06h, 0Eh, 16h, 1Eh: Not less than or equal / Not greater than or equal +/// (swapped operands) \n +/// 07h, 0Fh, 17h, 1Fh: Ordered +/// \returns A 128-bit vector of [2 x double] containing the comparison results. #define _mm_cmp_sd(a, b, c) __extension__ ({ \ - __m128d __a = (a); \ - __m128d __b = (b); \ - (__m128d)__builtin_ia32_cmpsd((__v2df)__a, (__v2df)__b, (c)); }) + (__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), \ + (__v2df)(__m128d)(b), (c)); }) +/// \brief Compares each of the corresponding scalar values of two 128-bit +/// vectors of [4 x float], using the operation specified by the immediate +/// integer operand. If the result is true, all 32 bits of the destination +/// vector are set; otherwise they are cleared. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_cmp_ss(__m128 a, __m128 b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the VCMPSS instruction. +/// +/// \param a +/// A 128-bit vector of [4 x float]. +/// \param b +/// A 128-bit vector of [4 x float]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 00h, 08h, 10h, 18h: Equal \n +/// 01h, 09h, 11h, 19h: Less than \n +/// 02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal +/// (swapped operands) \n +/// 03h, 0Bh, 13h, 1Bh: Unordered \n +/// 04h, 0Ch, 14h, 1Ch: Not equal \n +/// 05h, 0Dh, 15h, 1Dh: Not less than / Not greater than +/// (swapped operands) \n +/// 06h, 0Eh, 16h, 1Eh: Not less than or equal / Not greater than or equal +/// (swapped operands) \n +/// 07h, 0Fh, 17h, 1Fh: Ordered +/// \returns A 128-bit vector of [4 x float] containing the comparison results. #define _mm_cmp_ss(a, b, c) __extension__ ({ \ - __m128 __a = (a); \ - __m128 __b = (b); \ - (__m128)__builtin_ia32_cmpss((__v4sf)__a, (__v4sf)__b, (c)); }) + (__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), \ + (__v4sf)(__m128)(b), (c)); }) +/// \brief Takes a [8 x i32] vector and returns the vector element value +/// indexed by the immediate constant operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VEXTRACTF128+COMPOSITE +/// instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32]. +/// \param __imm +/// An immediate integer operand with bits [2:0] determining which vector +/// element is extracted and returned. +/// \returns A 32-bit integer containing the extracted 32 bits of extended +/// packed data. static __inline int __DEFAULT_FN_ATTRS _mm256_extract_epi32(__m256i __a, const int __imm) { @@ -439,21 +1881,66 @@ _mm256_extract_epi32(__m256i __a, const int __imm) return __b[__imm & 7]; } +/// \brief Takes a [16 x i16] vector and returns the vector element value +/// indexed by the immediate constant operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VEXTRACTF128+COMPOSITE +/// instruction. +/// +/// \param __a +/// A 256-bit integer vector of [16 x i16]. +/// \param __imm +/// An immediate integer operand with bits [3:0] determining which vector +/// element is extracted and returned. +/// \returns A 32-bit integer containing the extracted 16 bits of zero extended +/// packed data. static __inline int __DEFAULT_FN_ATTRS _mm256_extract_epi16(__m256i __a, const int __imm) { __v16hi __b = (__v16hi)__a; - return __b[__imm & 15]; + return (unsigned short)__b[__imm & 15]; } +/// \brief Takes a [32 x i8] vector and returns the vector element value +/// indexed by the immediate constant operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VEXTRACTF128+COMPOSITE +/// instruction. +/// +/// \param __a +/// A 256-bit integer vector of [32 x i8]. +/// \param __imm +/// An immediate integer operand with bits [4:0] determining which vector +/// element is extracted and returned. +/// \returns A 32-bit integer containing the extracted 8 bits of zero extended +/// packed data. static __inline int __DEFAULT_FN_ATTRS _mm256_extract_epi8(__m256i __a, const int __imm) { __v32qi __b = (__v32qi)__a; - return __b[__imm & 31]; + return (unsigned char)__b[__imm & 31]; } #ifdef __x86_64__ +/// \brief Takes a [4 x i64] vector and returns the vector element value +/// indexed by the immediate constant operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VEXTRACTF128+COMPOSITE +/// instruction. +/// +/// \param __a +/// A 256-bit integer vector of [4 x i64]. +/// \param __imm +/// An immediate integer operand with bits [1:0] determining which vector +/// element is extracted and returned. +/// \returns A 64-bit integer containing the extracted 64 bits of extended +/// packed data. static __inline long long __DEFAULT_FN_ATTRS _mm256_extract_epi64(__m256i __a, const int __imm) { @@ -462,6 +1949,24 @@ _mm256_extract_epi64(__m256i __a, const int __imm) } #endif +/// \brief Takes a [8 x i32] vector and replaces the vector element value +/// indexed by the immediate constant operand by a new value. Returns the +/// modified vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128+COMPOSITE +/// instruction. +/// +/// \param __a +/// A vector of [8 x i32] to be used by the insert operation. +/// \param __b +/// An integer value. The replacement value for the insert operation. +/// \param __imm +/// An immediate integer specifying the index of the vector element to be +/// replaced. +/// \returns A copy of vector \a __a, after replacing its element indexed by +/// \a __imm with \a __b. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_insert_epi32(__m256i __a, int __b, int const __imm) { @@ -470,6 +1975,25 @@ _mm256_insert_epi32(__m256i __a, int __b, int const __imm) return (__m256i)__c; } + +/// \brief Takes a [16 x i16] vector and replaces the vector element value +/// indexed by the immediate constant operand with a new value. Returns the +/// modified vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128+COMPOSITE +/// instruction. +/// +/// \param __a +/// A vector of [16 x i16] to be used by the insert operation. +/// \param __b +/// An i16 integer value. The replacement value for the insert operation. +/// \param __imm +/// An immediate integer specifying the index of the vector element to be +/// replaced. +/// \returns A copy of vector \a __a, after replacing its element indexed by +/// \a __imm with \a __b. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_insert_epi16(__m256i __a, int __b, int const __imm) { @@ -478,6 +2002,24 @@ _mm256_insert_epi16(__m256i __a, int __b, int const __imm) return (__m256i)__c; } +/// \brief Takes a [32 x i8] vector and replaces the vector element value +/// indexed by the immediate constant operand with a new value. Returns the +/// modified vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128+COMPOSITE +/// instruction. +/// +/// \param __a +/// A vector of [32 x i8] to be used by the insert operation. +/// \param __b +/// An i8 integer value. The replacement value for the insert operation. +/// \param __imm +/// An immediate integer specifying the index of the vector element to be +/// replaced. +/// \returns A copy of vector \a __a, after replacing its element indexed by +/// \a __imm with \a __b. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_insert_epi8(__m256i __a, int __b, int const __imm) { @@ -487,6 +2029,24 @@ _mm256_insert_epi8(__m256i __a, int __b, int const __imm) } #ifdef __x86_64__ +/// \brief Takes a [4 x i64] vector and replaces the vector element value +/// indexed by the immediate constant operand with a new value. Returns the +/// modified vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128+COMPOSITE +/// instruction. +/// +/// \param __a +/// A vector of [4 x i64] to be used by the insert operation. +/// \param __b +/// A 64-bit integer value. The replacement value for the insert operation. +/// \param __imm +/// An immediate integer specifying the index of the vector element to be +/// replaced. +/// \returns A copy of vector \a __a, after replacing its element indexed by +/// \a __imm with \a __b. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_insert_epi64(__m256i __a, long long __b, int const __imm) { @@ -497,183 +2057,721 @@ _mm256_insert_epi64(__m256i __a, long long __b, int const __imm) #endif /* Conversion */ +/// \brief Converts a vector of [4 x i32] into a vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTDQ2PD instruction. +/// +/// \param __a +/// A 128-bit integer vector of [4 x i32]. +/// \returns A 256-bit vector of [4 x double] containing the converted values. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_cvtepi32_pd(__m128i __a) { - return (__m256d)__builtin_ia32_cvtdq2pd256((__v4si) __a); + return (__m256d)__builtin_convertvector((__v4si)__a, __v4df); } +/// \brief Converts a vector of [8 x i32] into a vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTDQ2PS instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \returns A 256-bit vector of [8 x float] containing the converted values. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_cvtepi32_ps(__m256i __a) { return (__m256)__builtin_ia32_cvtdq2ps256((__v8si) __a); } +/// \brief Converts a 256-bit vector of [4 x double] into a 128-bit vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPD2PS instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \returns A 128-bit vector of [4 x float] containing the converted values. static __inline __m128 __DEFAULT_FN_ATTRS _mm256_cvtpd_ps(__m256d __a) { return (__m128)__builtin_ia32_cvtpd2ps256((__v4df) __a); } +/// \brief Converts a vector of [8 x float] into a vector of [8 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPS2DQ instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit integer vector containing the converted values. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_cvtps_epi32(__m256 __a) { return (__m256i)__builtin_ia32_cvtps2dq256((__v8sf) __a); } +/// \brief Converts a 128-bit vector of [4 x float] into a 256-bit vector of [4 +/// x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPS2PD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 256-bit vector of [4 x double] containing the converted values. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_cvtps_pd(__m128 __a) { - return (__m256d)__builtin_ia32_cvtps2pd256((__v4sf) __a); + return (__m256d)__builtin_convertvector((__v4sf)__a, __v4df); } +/// \brief Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4 +/// x i32], truncating the result by rounding towards zero when it is +/// inexact. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTPD2DQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \returns A 128-bit integer vector containing the converted values. static __inline __m128i __DEFAULT_FN_ATTRS _mm256_cvttpd_epi32(__m256d __a) { return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df) __a); } +/// \brief Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4 +/// x i32]. When a conversion is inexact, the value returned is rounded +/// according to the rounding control bits in the MXCSR register. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPD2DQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \returns A 128-bit integer vector containing the converted values. static __inline __m128i __DEFAULT_FN_ATTRS _mm256_cvtpd_epi32(__m256d __a) { return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df) __a); } +/// \brief Converts a vector of [8 x float] into a vector of [8 x i32], +/// truncating the result by rounding towards zero when it is inexact. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTPS2DQ instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit integer vector containing the converted values. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_cvttps_epi32(__m256 __a) { return (__m256i)__builtin_ia32_cvttps2dq256((__v8sf) __a); } +static __inline double __DEFAULT_FN_ATTRS +_mm256_cvtsd_f64(__m256d __a) +{ + return __a[0]; +} + +static __inline int __DEFAULT_FN_ATTRS +_mm256_cvtsi256_si32(__m256i __a) +{ + __v8si __b = (__v8si)__a; + return __b[0]; +} + +static __inline float __DEFAULT_FN_ATTRS +_mm256_cvtss_f32(__m256 __a) +{ + return __a[0]; +} + /* Vector replicate */ +/// \brief Moves and duplicates high-order (odd-indexed) values from a 256-bit +/// vector of [8 x float] to float values in a 256-bit vector of +/// [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSHDUP instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. \n +/// Bits [255:224] of \a __a are written to bits [255:224] and [223:192] of +/// the return value. \n +/// Bits [191:160] of \a __a are written to bits [191:160] and [159:128] of +/// the return value. \n +/// Bits [127:96] of \a __a are written to bits [127:96] and [95:64] of the +/// return value. \n +/// Bits [63:32] of \a __a are written to bits [63:32] and [31:0] of the +/// return value. +/// \returns A 256-bit vector of [8 x float] containing the moved and duplicated +/// values. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_movehdup_ps(__m256 __a) { - return __builtin_shufflevector(__a, __a, 1, 1, 3, 3, 5, 5, 7, 7); + return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 1, 1, 3, 3, 5, 5, 7, 7); } +/// \brief Moves and duplicates low-order (even-indexed) values from a 256-bit +/// vector of [8 x float] to float values in a 256-bit vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSLDUP instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. \n +/// Bits [223:192] of \a __a are written to bits [255:224] and [223:192] of +/// the return value. \n +/// Bits [159:128] of \a __a are written to bits [191:160] and [159:128] of +/// the return value. \n +/// Bits [95:64] of \a __a are written to bits [127:96] and [95:64] of the +/// return value. \n +/// Bits [31:0] of \a __a are written to bits [63:32] and [31:0] of the +/// return value. +/// \returns A 256-bit vector of [8 x float] containing the moved and duplicated +/// values. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_moveldup_ps(__m256 __a) { - return __builtin_shufflevector(__a, __a, 0, 0, 2, 2, 4, 4, 6, 6); + return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 0, 2, 2, 4, 4, 6, 6); } +/// \brief Moves and duplicates double-precision floating point values from a +/// 256-bit vector of [4 x double] to double-precision values in a 256-bit +/// vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDDUP instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. \n +/// Bits [63:0] of \a __a are written to bits [127:64] and [63:0] of the +/// return value. \n +/// Bits [191:128] of \a __a are written to bits [255:192] and [191:128] of +/// the return value. +/// \returns A 256-bit vector of [4 x double] containing the moved and +/// duplicated values. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_movedup_pd(__m256d __a) { - return __builtin_shufflevector(__a, __a, 0, 0, 2, 2); + return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 0, 2, 2); } /* Unpack and Interleave */ +/// \brief Unpacks the odd-indexed vector elements from two 256-bit vectors of +/// [4 x double] and interleaves them into a 256-bit vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKHPD instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [4 x double]. \n +/// Bits [127:64] are written to bits [63:0] of the return value. \n +/// Bits [255:192] are written to bits [191:128] of the return value. \n +/// \param __b +/// A 256-bit floating-point vector of [4 x double]. \n +/// Bits [127:64] are written to bits [127:64] of the return value. \n +/// Bits [255:192] are written to bits [255:192] of the return value. \n +/// \returns A 256-bit vector of [4 x double] containing the interleaved values. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_unpackhi_pd(__m256d __a, __m256d __b) { - return __builtin_shufflevector(__a, __b, 1, 5, 1+2, 5+2); + return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 1, 5, 1+2, 5+2); } +/// \brief Unpacks the even-indexed vector elements from two 256-bit vectors of +/// [4 x double] and interleaves them into a 256-bit vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPD instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [4 x double]. \n +/// Bits [63:0] are written to bits [63:0] of the return value. \n +/// Bits [191:128] are written to bits [191:128] of the return value. +/// \param __b +/// A 256-bit floating-point vector of [4 x double]. \n +/// Bits [63:0] are written to bits [127:64] of the return value. \n +/// Bits [191:128] are written to bits [255:192] of the return value. \n +/// \returns A 256-bit vector of [4 x double] containing the interleaved values. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_unpacklo_pd(__m256d __a, __m256d __b) { - return __builtin_shufflevector(__a, __b, 0, 4, 0+2, 4+2); + return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 0, 4, 0+2, 4+2); } +/// \brief Unpacks the 32-bit vector elements 2, 3, 6 and 7 from each of the +/// two 256-bit vectors of [8 x float] and interleaves them into a 256-bit +/// vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKHPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. \n +/// Bits [95:64] are written to bits [31:0] of the return value. \n +/// Bits [127:96] are written to bits [95:64] of the return value. \n +/// Bits [223:192] are written to bits [159:128] of the return value. \n +/// Bits [255:224] are written to bits [223:192] of the return value. +/// \param __b +/// A 256-bit vector of [8 x float]. \n +/// Bits [95:64] are written to bits [63:32] of the return value. \n +/// Bits [127:96] are written to bits [127:96] of the return value. \n +/// Bits [223:192] are written to bits [191:160] of the return value. \n +/// Bits [255:224] are written to bits [255:224] of the return value. +/// \returns A 256-bit vector of [8 x float] containing the interleaved values. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_unpackhi_ps(__m256 __a, __m256 __b) { - return __builtin_shufflevector(__a, __b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1); + return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1); } +/// \brief Unpacks the 32-bit vector elements 0, 1, 4 and 5 from each of the +/// two 256-bit vectors of [8 x float] and interleaves them into a 256-bit +/// vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. \n +/// Bits [31:0] are written to bits [31:0] of the return value. \n +/// Bits [63:32] are written to bits [95:64] of the return value. \n +/// Bits [159:128] are written to bits [159:128] of the return value. \n +/// Bits [191:160] are written to bits [223:192] of the return value. +/// \param __b +/// A 256-bit vector of [8 x float]. \n +/// Bits [31:0] are written to bits [63:32] of the return value. \n +/// Bits [63:32] are written to bits [127:96] of the return value. \n +/// Bits [159:128] are written to bits [191:160] of the return value. \n +/// Bits [191:160] are written to bits [255:224] of the return value. +/// \returns A 256-bit vector of [8 x float] containing the interleaved values. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_unpacklo_ps(__m256 __a, __m256 __b) { - return __builtin_shufflevector(__a, __b, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1); + return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1); } /* Bit Test */ +/// \brief Given two 128-bit floating-point vectors of [2 x double], perform an +/// element-by-element comparison of the double-precision element in the +/// first source vector and the corresponding element in the second source +/// vector. The EFLAGS register is updated as follows: \n +/// If there is at least one pair of double-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of double-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the ZF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns the ZF flag in the EFLAGS register. static __inline int __DEFAULT_FN_ATTRS _mm_testz_pd(__m128d __a, __m128d __b) { return __builtin_ia32_vtestzpd((__v2df)__a, (__v2df)__b); } +/// \brief Given two 128-bit floating-point vectors of [2 x double], perform an +/// element-by-element comparison of the double-precision element in the +/// first source vector and the corresponding element in the second source +/// vector. The EFLAGS register is updated as follows: \n +/// If there is at least one pair of double-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of double-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the CF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns the CF flag in the EFLAGS register. static __inline int __DEFAULT_FN_ATTRS _mm_testc_pd(__m128d __a, __m128d __b) { return __builtin_ia32_vtestcpd((__v2df)__a, (__v2df)__b); } +/// \brief Given two 128-bit floating-point vectors of [2 x double], perform an +/// element-by-element comparison of the double-precision element in the +/// first source vector and the corresponding element in the second source +/// vector. The EFLAGS register is updated as follows: \n +/// If there is at least one pair of double-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of double-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns 1 if both the ZF and CF flags are set to 0, +/// otherwise it returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0. static __inline int __DEFAULT_FN_ATTRS _mm_testnzc_pd(__m128d __a, __m128d __b) { return __builtin_ia32_vtestnzcpd((__v2df)__a, (__v2df)__b); } +/// \brief Given two 128-bit floating-point vectors of [4 x float], perform an +/// element-by-element comparison of the single-precision element in the +/// first source vector and the corresponding element in the second source +/// vector. The EFLAGS register is updated as follows: \n +/// If there is at least one pair of single-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of single-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the ZF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns the ZF flag. static __inline int __DEFAULT_FN_ATTRS _mm_testz_ps(__m128 __a, __m128 __b) { return __builtin_ia32_vtestzps((__v4sf)__a, (__v4sf)__b); } +/// \brief Given two 128-bit floating-point vectors of [4 x float], perform an +/// element-by-element comparison of the single-precision element in the +/// first source vector and the corresponding element in the second source +/// vector. The EFLAGS register is updated as follows: \n +/// If there is at least one pair of single-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of single-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the CF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns the CF flag. static __inline int __DEFAULT_FN_ATTRS _mm_testc_ps(__m128 __a, __m128 __b) { return __builtin_ia32_vtestcps((__v4sf)__a, (__v4sf)__b); } +/// \brief Given two 128-bit floating-point vectors of [4 x float], perform an +/// element-by-element comparison of the single-precision element in the +/// first source vector and the corresponding element in the second source +/// vector. The EFLAGS register is updated as follows: \n +/// If there is at least one pair of single-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of single-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns 1 if both the ZF and CF flags are set to 0, +/// otherwise it returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0. static __inline int __DEFAULT_FN_ATTRS _mm_testnzc_ps(__m128 __a, __m128 __b) { return __builtin_ia32_vtestnzcps((__v4sf)__a, (__v4sf)__b); } +/// \brief Given two 256-bit floating-point vectors of [4 x double], perform an +/// element-by-element comparison of the double-precision elements in the +/// first source vector and the corresponding elements in the second source +/// vector. The EFLAGS register is updated as follows: \n +/// If there is at least one pair of double-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of double-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the ZF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \param __b +/// A 256-bit vector of [4 x double]. +/// \returns the ZF flag. static __inline int __DEFAULT_FN_ATTRS _mm256_testz_pd(__m256d __a, __m256d __b) { return __builtin_ia32_vtestzpd256((__v4df)__a, (__v4df)__b); } +/// \brief Given two 256-bit floating-point vectors of [4 x double], perform an +/// element-by-element comparison of the double-precision elements in the +/// first source vector and the corresponding elements in the second source +/// vector. The EFLAGS register is updated as follows: \n +/// If there is at least one pair of double-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of double-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the CF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \param __b +/// A 256-bit vector of [4 x double]. +/// \returns the CF flag. static __inline int __DEFAULT_FN_ATTRS _mm256_testc_pd(__m256d __a, __m256d __b) { return __builtin_ia32_vtestcpd256((__v4df)__a, (__v4df)__b); } +/// \brief Given two 256-bit floating-point vectors of [4 x double], perform an +/// element-by-element comparison of the double-precision elements in the +/// first source vector and the corresponding elements in the second source +/// vector. The EFLAGS register is updated as follows: \n +/// If there is at least one pair of double-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of double-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns 1 if both the ZF and CF flags are set to 0, +/// otherwise it returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \param __b +/// A 256-bit vector of [4 x double]. +/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0. static __inline int __DEFAULT_FN_ATTRS _mm256_testnzc_pd(__m256d __a, __m256d __b) { return __builtin_ia32_vtestnzcpd256((__v4df)__a, (__v4df)__b); } +/// \brief Given two 256-bit floating-point vectors of [8 x float], perform an +/// element-by-element comparison of the single-precision element in the +/// first source vector and the corresponding element in the second source +/// vector. The EFLAGS register is updated as follows: \n +/// If there is at least one pair of single-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of single-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the ZF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \param __b +/// A 256-bit vector of [8 x float]. +/// \returns the ZF flag. static __inline int __DEFAULT_FN_ATTRS _mm256_testz_ps(__m256 __a, __m256 __b) { return __builtin_ia32_vtestzps256((__v8sf)__a, (__v8sf)__b); } +/// \brief Given two 256-bit floating-point vectors of [8 x float], perform an +/// element-by-element comparison of the single-precision element in the +/// first source vector and the corresponding element in the second source +/// vector. The EFLAGS register is updated as follows: \n +/// If there is at least one pair of single-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of single-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the CF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \param __b +/// A 256-bit vector of [8 x float]. +/// \returns the CF flag. static __inline int __DEFAULT_FN_ATTRS _mm256_testc_ps(__m256 __a, __m256 __b) { return __builtin_ia32_vtestcps256((__v8sf)__a, (__v8sf)__b); } +/// \brief Given two 256-bit floating-point vectors of [8 x float], perform an +/// element-by-element comparison of the single-precision elements in the +/// first source vector and the corresponding elements in the second source +/// vector. The EFLAGS register is updated as follows: \n +/// If there is at least one pair of single-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of single-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns 1 if both the ZF and CF flags are set to 0, +/// otherwise it returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \param __b +/// A 256-bit vector of [8 x float]. +/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0. static __inline int __DEFAULT_FN_ATTRS _mm256_testnzc_ps(__m256 __a, __m256 __b) { return __builtin_ia32_vtestnzcps256((__v8sf)__a, (__v8sf)__b); } +/// \brief Given two 256-bit integer vectors, perform a bit-by-bit comparison +/// of the two source vectors and update the EFLAGS register as follows: \n +/// If there is at least one pair of bits where both bits are 1, the ZF flag +/// is set to 0. Otherwise the ZF flag is set to 1. \n +/// If there is at least one pair of bits where the bit from the first source +/// vector is 0 and the bit from the second source vector is 1, the CF flag +/// is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the ZF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPTEST instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns the ZF flag. static __inline int __DEFAULT_FN_ATTRS _mm256_testz_si256(__m256i __a, __m256i __b) { return __builtin_ia32_ptestz256((__v4di)__a, (__v4di)__b); } +/// \brief Given two 256-bit integer vectors, perform a bit-by-bit comparison +/// of the two source vectors and update the EFLAGS register as follows: \n +/// If there is at least one pair of bits where both bits are 1, the ZF flag +/// is set to 0. Otherwise the ZF flag is set to 1. \n +/// If there is at least one pair of bits where the bit from the first source +/// vector is 0 and the bit from the second source vector is 1, the CF flag +/// is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the CF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPTEST instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns the CF flag. static __inline int __DEFAULT_FN_ATTRS _mm256_testc_si256(__m256i __a, __m256i __b) { return __builtin_ia32_ptestc256((__v4di)__a, (__v4di)__b); } +/// \brief Given two 256-bit integer vectors, perform a bit-by-bit comparison +/// of the two source vectors and update the EFLAGS register as follows: \n +/// If there is at least one pair of bits where both bits are 1, the ZF flag +/// is set to 0. Otherwise the ZF flag is set to 1. \n +/// If there is at least one pair of bits where the bit from the first source +/// vector is 0 and the bit from the second source vector is 1, the CF flag +/// is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns 1 if both the ZF and CF flags are set to 0, +/// otherwise it returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPTEST instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0. static __inline int __DEFAULT_FN_ATTRS _mm256_testnzc_si256(__m256i __a, __m256i __b) { @@ -681,12 +2779,36 @@ _mm256_testnzc_si256(__m256i __a, __m256i __b) } /* Vector extract sign mask */ +/// \brief Extracts the sign bits of double-precision floating point elements +/// in a 256-bit vector of [4 x double] and writes them to the lower order +/// bits of the return value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVMSKPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing the double-precision +/// floating point values with sign bits to be extracted. +/// \returns The sign bits from the operand, written to bits [3:0]. static __inline int __DEFAULT_FN_ATTRS _mm256_movemask_pd(__m256d __a) { return __builtin_ia32_movmskpd256((__v4df)__a); } +/// \brief Extracts the sign bits of double-precision floating point elements +/// in a 256-bit vector of [8 x float] and writes them to the lower order +/// bits of the return value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVMSKPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing the double-precision floating +/// point values with sign bits to be extracted. +/// \returns The sign bits from the operand, written to bits [7:0]. static __inline int __DEFAULT_FN_ATTRS _mm256_movemask_ps(__m256 __a) { @@ -694,12 +2816,22 @@ _mm256_movemask_ps(__m256 __a) } /* Vector __zero */ +/// \brief Zeroes the contents of all XMM or YMM registers. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VZEROALL instruction. static __inline void __DEFAULT_FN_ATTRS _mm256_zeroall(void) { __builtin_ia32_vzeroall(); } +/// \brief Zeroes the upper 128 bits (bits 255:128) of all YMM registers. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VZEROUPPER instruction. static __inline void __DEFAULT_FN_ATTRS _mm256_zeroupper(void) { @@ -707,6 +2839,18 @@ _mm256_zeroupper(void) } /* Vector load with broadcast */ +/// \brief Loads a scalar single-precision floating point value from the +/// specified address pointed to by \a __a and broadcasts it to the elements +/// of a [4 x float] vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBROADCASTSS instruction. +/// +/// \param __a +/// The single-precision floating point value to be broadcast. +/// \returns A 128-bit vector of [4 x float] whose 32-bit elements are set +/// equal to the broadcast value. static __inline __m128 __DEFAULT_FN_ATTRS _mm_broadcast_ss(float const *__a) { @@ -714,6 +2858,18 @@ _mm_broadcast_ss(float const *__a) return (__m128)(__v4sf){ __f, __f, __f, __f }; } +/// \brief Loads a scalar double-precision floating point value from the +/// specified address pointed to by \a __a and broadcasts it to the elements +/// of a [4 x double] vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBROADCASTSD instruction. +/// +/// \param __a +/// The double-precision floating point value to be broadcast. +/// \returns A 256-bit vector of [4 x double] whose 64-bit elements are set +/// equal to the broadcast value. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_broadcast_sd(double const *__a) { @@ -721,6 +2877,18 @@ _mm256_broadcast_sd(double const *__a) return (__m256d)(__v4df){ __d, __d, __d, __d }; } +/// \brief Loads a scalar single-precision floating point value from the +/// specified address pointed to by \a __a and broadcasts it to the elements +/// of a [8 x float] vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBROADCASTSS instruction. +/// +/// \param __a +/// The single-precision floating point value to be broadcast. +/// \returns A 256-bit vector of [8 x float] whose 32-bit elements are set +/// equal to the broadcast value. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_broadcast_ss(float const *__a) { @@ -728,31 +2896,87 @@ _mm256_broadcast_ss(float const *__a) return (__m256)(__v8sf){ __f, __f, __f, __f, __f, __f, __f, __f }; } +/// \brief Loads the data from a 128-bit vector of [2 x double] from the +/// specified address pointed to by \a __a and broadcasts it to 128-bit +/// elements in a 256-bit vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBROADCASTF128 instruction. +/// +/// \param __a +/// The 128-bit vector of [2 x double] to be broadcast. +/// \returns A 256-bit vector of [4 x double] whose 128-bit elements are set +/// equal to the broadcast value. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_broadcast_pd(__m128d const *__a) { - return (__m256d)__builtin_ia32_vbroadcastf128_pd256(__a); + return (__m256d)__builtin_ia32_vbroadcastf128_pd256((__v2df const *)__a); } +/// \brief Loads the data from a 128-bit vector of [4 x float] from the +/// specified address pointed to by \a __a and broadcasts it to 128-bit +/// elements in a 256-bit vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBROADCASTF128 instruction. +/// +/// \param __a +/// The 128-bit vector of [4 x float] to be broadcast. +/// \returns A 256-bit vector of [8 x float] whose 128-bit elements are set +/// equal to the broadcast value. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_broadcast_ps(__m128 const *__a) { - return (__m256)__builtin_ia32_vbroadcastf128_ps256(__a); + return (__m256)__builtin_ia32_vbroadcastf128_ps256((__v4sf const *)__a); } /* SIMD load ops */ +/// \brief Loads 4 double-precision floating point values from a 32-byte aligned +/// memory location pointed to by \a __p into a vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPD instruction. +/// +/// \param __p +/// A 32-byte aligned pointer to a memory location containing +/// double-precision floating point values. +/// \returns A 256-bit vector of [4 x double] containing the moved values. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_load_pd(double const *__p) { return *(__m256d *)__p; } +/// \brief Loads 8 single-precision floating point values from a 32-byte aligned +/// memory location pointed to by \a __p into a vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPS instruction. +/// +/// \param __p +/// A 32-byte aligned pointer to a memory location containing float values. +/// \returns A 256-bit vector of [8 x float] containing the moved values. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_load_ps(float const *__p) { return *(__m256 *)__p; } +/// \brief Loads 4 double-precision floating point values from an unaligned +/// memory location pointed to by \a __p into a vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPD instruction. +/// +/// \param __p +/// A pointer to a memory location containing double-precision floating +/// point values. +/// \returns A 256-bit vector of [4 x double] containing the moved values. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_loadu_pd(double const *__p) { @@ -762,6 +2986,17 @@ _mm256_loadu_pd(double const *__p) return ((struct __loadu_pd*)__p)->__v; } +/// \brief Loads 8 single-precision floating point values from an unaligned +/// memory location pointed to by \a __p into a vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPS instruction. +/// +/// \param __p +/// A pointer to a memory location containing single-precision floating +/// point values. +/// \returns A 256-bit vector of [8 x float] containing the moved values. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_loadu_ps(float const *__p) { @@ -771,12 +3006,33 @@ _mm256_loadu_ps(float const *__p) return ((struct __loadu_ps*)__p)->__v; } +/// \brief Loads 256 bits of integer data from a 32-byte aligned memory +/// location pointed to by \a __p into elements of a 256-bit integer vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDQA instruction. +/// +/// \param __p +/// A 32-byte aligned pointer to a 256-bit integer vector containing integer +/// values. +/// \returns A 256-bit integer vector containing the moved values. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_load_si256(__m256i const *__p) { return *__p; } +/// \brief Loads 256 bits of integer data from an unaligned memory location +/// pointed to by \a __p into a 256-bit integer vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDQU instruction. +/// +/// \param __p +/// A pointer to a 256-bit integer vector containing integer values. +/// \returns A 256-bit integer vector containing the moved values. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_loadu_si256(__m256i const *__p) { @@ -786,6 +3042,18 @@ _mm256_loadu_si256(__m256i const *__p) return ((struct __loadu_si256*)__p)->__v; } +/// \brief Loads 256 bits of integer data from an unaligned memory location +/// pointed to by \a __p into a 256-bit integer vector. This intrinsic may +/// perform better than \c _mm256_loadu_si256 when the data crosses a cache +/// line boundary. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VLDDQU instruction. +/// +/// \param __p +/// A pointer to a 256-bit integer vector containing integer values. +/// \returns A 256-bit integer vector containing the moved values. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_lddqu_si256(__m256i const *__p) { @@ -793,119 +3061,476 @@ _mm256_lddqu_si256(__m256i const *__p) } /* SIMD store ops */ +/// \brief Stores double-precision floating point values from a 256-bit vector +/// of [4 x double] to a 32-byte aligned memory location pointed to by +/// \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPD instruction. +/// +/// \param __p +/// A 32-byte aligned pointer to a memory location that will receive the +/// double-precision floaing point values. +/// \param __a +/// A 256-bit vector of [4 x double] containing the values to be moved. static __inline void __DEFAULT_FN_ATTRS _mm256_store_pd(double *__p, __m256d __a) { *(__m256d *)__p = __a; } +/// \brief Stores single-precision floating point values from a 256-bit vector +/// of [8 x float] to a 32-byte aligned memory location pointed to by \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPS instruction. +/// +/// \param __p +/// A 32-byte aligned pointer to a memory location that will receive the +/// float values. +/// \param __a +/// A 256-bit vector of [8 x float] containing the values to be moved. static __inline void __DEFAULT_FN_ATTRS _mm256_store_ps(float *__p, __m256 __a) { *(__m256 *)__p = __a; } +/// \brief Stores double-precision floating point values from a 256-bit vector +/// of [4 x double] to an unaligned memory location pointed to by \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPD instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the double-precision +/// floating point values. +/// \param __a +/// A 256-bit vector of [4 x double] containing the values to be moved. static __inline void __DEFAULT_FN_ATTRS _mm256_storeu_pd(double *__p, __m256d __a) { - __builtin_ia32_storeupd256(__p, (__v4df)__a); + struct __storeu_pd { + __m256d __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_pd*)__p)->__v = __a; } +/// \brief Stores single-precision floating point values from a 256-bit vector +/// of [8 x float] to an unaligned memory location pointed to by \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPS instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the float values. +/// \param __a +/// A 256-bit vector of [8 x float] containing the values to be moved. static __inline void __DEFAULT_FN_ATTRS _mm256_storeu_ps(float *__p, __m256 __a) { - __builtin_ia32_storeups256(__p, (__v8sf)__a); + struct __storeu_ps { + __m256 __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_ps*)__p)->__v = __a; } +/// \brief Stores integer values from a 256-bit integer vector to a 32-byte +/// aligned memory location pointed to by \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDQA instruction. +/// +/// \param __p +/// A 32-byte aligned pointer to a memory location that will receive the +/// integer values. +/// \param __a +/// A 256-bit integer vector containing the values to be moved. static __inline void __DEFAULT_FN_ATTRS _mm256_store_si256(__m256i *__p, __m256i __a) { *__p = __a; } +/// \brief Stores integer values from a 256-bit integer vector to an unaligned +/// memory location pointed to by \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDQU instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the integer values. +/// \param __a +/// A 256-bit integer vector containing the values to be moved. static __inline void __DEFAULT_FN_ATTRS _mm256_storeu_si256(__m256i *__p, __m256i __a) { - __builtin_ia32_storedqu256((char *)__p, (__v32qi)__a); + struct __storeu_si256 { + __m256i __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_si256*)__p)->__v = __a; } /* Conditional load ops */ +/// \brief Conditionally loads double-precision floating point elements from a +/// memory location pointed to by \a __p into a 128-bit vector of +/// [2 x double], depending on the mask bits associated with each data +/// element. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVPD instruction. +/// +/// \param __p +/// A pointer to a memory location that contains the double-precision +/// floating point values. +/// \param __m +/// A 128-bit integer vector containing the mask. The most significant bit of +/// each data element represents the mask bits. If a mask bit is zero, the +/// corresponding value in the memory location is not loaded and the +/// corresponding field in the return value is set to zero. +/// \returns A 128-bit vector of [2 x double] containing the loaded values. static __inline __m128d __DEFAULT_FN_ATTRS -_mm_maskload_pd(double const *__p, __m128d __m) +_mm_maskload_pd(double const *__p, __m128i __m) { - return (__m128d)__builtin_ia32_maskloadpd((const __v2df *)__p, (__v2df)__m); + return (__m128d)__builtin_ia32_maskloadpd((const __v2df *)__p, (__v2di)__m); } +/// \brief Conditionally loads double-precision floating point elements from a +/// memory location pointed to by \a __p into a 256-bit vector of +/// [4 x double], depending on the mask bits associated with each data +/// element. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVPD instruction. +/// +/// \param __p +/// A pointer to a memory location that contains the double-precision +/// floating point values. +/// \param __m +/// A 256-bit integer vector of [4 x quadword] containing the mask. The most +/// significant bit of each quadword element represents the mask bits. If a +/// mask bit is zero, the corresponding value in the memory location is not +/// loaded and the corresponding field in the return value is set to zero. +/// \returns A 256-bit vector of [4 x double] containing the loaded values. static __inline __m256d __DEFAULT_FN_ATTRS -_mm256_maskload_pd(double const *__p, __m256d __m) +_mm256_maskload_pd(double const *__p, __m256i __m) { return (__m256d)__builtin_ia32_maskloadpd256((const __v4df *)__p, - (__v4df)__m); + (__v4di)__m); } +/// \brief Conditionally loads single-precision floating point elements from a +/// memory location pointed to by \a __p into a 128-bit vector of +/// [4 x float], depending on the mask bits associated with each data +/// element. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVPS instruction. +/// +/// \param __p +/// A pointer to a memory location that contains the single-precision +/// floating point values. +/// \param __m +/// A 128-bit integer vector containing the mask. The most significant bit of +/// each data element represents the mask bits. If a mask bit is zero, the +/// corresponding value in the memory location is not loaded and the +/// corresponding field in the return value is set to zero. +/// \returns A 128-bit vector of [4 x float] containing the loaded values. static __inline __m128 __DEFAULT_FN_ATTRS -_mm_maskload_ps(float const *__p, __m128 __m) +_mm_maskload_ps(float const *__p, __m128i __m) { - return (__m128)__builtin_ia32_maskloadps((const __v4sf *)__p, (__v4sf)__m); + return (__m128)__builtin_ia32_maskloadps((const __v4sf *)__p, (__v4si)__m); } +/// \brief Conditionally loads single-precision floating point elements from a +/// memory location pointed to by \a __p into a 256-bit vector of +/// [8 x float], depending on the mask bits associated with each data +/// element. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVPS instruction. +/// +/// \param __p +/// A pointer to a memory location that contains the single-precision +/// floating point values. +/// \param __m +/// A 256-bit integer vector of [8 x dword] containing the mask. The most +/// significant bit of each dword element represents the mask bits. If a mask +/// bit is zero, the corresponding value in the memory location is not loaded +/// and the corresponding field in the return value is set to zero. +/// \returns A 256-bit vector of [8 x float] containing the loaded values. static __inline __m256 __DEFAULT_FN_ATTRS -_mm256_maskload_ps(float const *__p, __m256 __m) +_mm256_maskload_ps(float const *__p, __m256i __m) { - return (__m256)__builtin_ia32_maskloadps256((const __v8sf *)__p, (__v8sf)__m); + return (__m256)__builtin_ia32_maskloadps256((const __v8sf *)__p, (__v8si)__m); } /* Conditional store ops */ +/// \brief Moves single-precision floating point values from a 256-bit vector +/// of [8 x float] to a memory location pointed to by \a __p, according to +/// the specified mask. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVPS instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the float values. +/// \param __m +/// A 256-bit integer vector of [8 x dword] containing the mask. The most +/// significant bit of each dword element in the mask vector represents the +/// mask bits. If a mask bit is zero, the corresponding value from vector +/// \a __a is not stored and the corresponding field in the memory location +/// pointed to by \a __p is not changed. +/// \param __a +/// A 256-bit vector of [8 x float] containing the values to be stored. static __inline void __DEFAULT_FN_ATTRS -_mm256_maskstore_ps(float *__p, __m256 __m, __m256 __a) +_mm256_maskstore_ps(float *__p, __m256i __m, __m256 __a) { - __builtin_ia32_maskstoreps256((__v8sf *)__p, (__v8sf)__m, (__v8sf)__a); + __builtin_ia32_maskstoreps256((__v8sf *)__p, (__v8si)__m, (__v8sf)__a); } +/// \brief Moves double-precision values from a 128-bit vector of [2 x double] +/// to a memory location pointed to by \a __p, according to the specified +/// mask. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVPD instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the float values. +/// \param __m +/// A 128-bit integer vector containing the mask. The most significant bit of +/// each field in the mask vector represents the mask bits. If a mask bit is +/// zero, the corresponding value from vector \a __a is not stored and the +/// corresponding field in the memory location pointed to by \a __p is not +/// changed. +/// \param __a +/// A 128-bit vector of [2 x double] containing the values to be stored. static __inline void __DEFAULT_FN_ATTRS -_mm_maskstore_pd(double *__p, __m128d __m, __m128d __a) +_mm_maskstore_pd(double *__p, __m128i __m, __m128d __a) { - __builtin_ia32_maskstorepd((__v2df *)__p, (__v2df)__m, (__v2df)__a); + __builtin_ia32_maskstorepd((__v2df *)__p, (__v2di)__m, (__v2df)__a); } +/// \brief Moves double-precision values from a 256-bit vector of [4 x double] +/// to a memory location pointed to by \a __p, according to the specified +/// mask. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVPD instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the float values. +/// \param __m +/// A 256-bit integer vector of [4 x quadword] containing the mask. The most +/// significant bit of each quadword element in the mask vector represents +/// the mask bits. If a mask bit is zero, the corresponding value from vector +/// __a is not stored and the corresponding field in the memory location +/// pointed to by \a __p is not changed. +/// \param __a +/// A 256-bit vector of [4 x double] containing the values to be stored. static __inline void __DEFAULT_FN_ATTRS -_mm256_maskstore_pd(double *__p, __m256d __m, __m256d __a) +_mm256_maskstore_pd(double *__p, __m256i __m, __m256d __a) { - __builtin_ia32_maskstorepd256((__v4df *)__p, (__v4df)__m, (__v4df)__a); + __builtin_ia32_maskstorepd256((__v4df *)__p, (__v4di)__m, (__v4df)__a); } +/// \brief Moves single-precision floating point values from a 128-bit vector +/// of [4 x float] to a memory location pointed to by \a __p, according to +/// the specified mask. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVPS instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the float values. +/// \param __m +/// A 128-bit integer vector containing the mask. The most significant bit of +/// each field in the mask vector represents the mask bits. If a mask bit is +/// zero, the corresponding value from vector __a is not stored and the +/// corresponding field in the memory location pointed to by \a __p is not +/// changed. +/// \param __a +/// A 128-bit vector of [4 x float] containing the values to be stored. static __inline void __DEFAULT_FN_ATTRS -_mm_maskstore_ps(float *__p, __m128 __m, __m128 __a) +_mm_maskstore_ps(float *__p, __m128i __m, __m128 __a) { - __builtin_ia32_maskstoreps((__v4sf *)__p, (__v4sf)__m, (__v4sf)__a); + __builtin_ia32_maskstoreps((__v4sf *)__p, (__v4si)__m, (__v4sf)__a); } /* Cacheability support ops */ +/// \brief Moves integer data from a 256-bit integer vector to a 32-byte +/// aligned memory location. To minimize caching, the data is flagged as +/// non-temporal (unlikely to be used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVNTDQ instruction. +/// +/// \param __a +/// A pointer to a 32-byte aligned memory location that will receive the +/// integer values. +/// \param __b +/// A 256-bit integer vector containing the values to be moved. static __inline void __DEFAULT_FN_ATTRS _mm256_stream_si256(__m256i *__a, __m256i __b) { - __builtin_ia32_movntdq256((__v4di *)__a, (__v4di)__b); + __builtin_nontemporal_store((__v4di)__b, (__v4di*)__a); } +/// \brief Moves double-precision values from a 256-bit vector of [4 x double] +/// to a 32-byte aligned memory location. To minimize caching, the data is +/// flagged as non-temporal (unlikely to be used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVNTPD instruction. +/// +/// \param __a +/// A pointer to a 32-byte aligned memory location that will receive the +/// integer values. +/// \param __b +/// A 256-bit vector of [4 x double] containing the values to be moved. static __inline void __DEFAULT_FN_ATTRS _mm256_stream_pd(double *__a, __m256d __b) { - __builtin_ia32_movntpd256(__a, (__v4df)__b); + __builtin_nontemporal_store((__v4df)__b, (__v4df*)__a); } +/// \brief Moves single-precision floating point values from a 256-bit vector +/// of [8 x float] to a 32-byte aligned memory location. To minimize +/// caching, the data is flagged as non-temporal (unlikely to be used again +/// soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVNTPS instruction. +/// +/// \param __p +/// A pointer to a 32-byte aligned memory location that will receive the +/// single-precision floating point values. +/// \param __a +/// A 256-bit vector of [8 x float] containing the values to be moved. static __inline void __DEFAULT_FN_ATTRS _mm256_stream_ps(float *__p, __m256 __a) { - __builtin_ia32_movntps256(__p, (__v8sf)__a); + __builtin_nontemporal_store((__v8sf)__a, (__v8sf*)__p); } /* Create vectors */ +/// \brief Create a 256-bit vector of [4 x double] with undefined values. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \returns A 256-bit vector of [4 x double] containing undefined values. +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_undefined_pd(void) +{ + return (__m256d)__builtin_ia32_undef256(); +} + +/// \brief Create a 256-bit vector of [8 x float] with undefined values. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \returns A 256-bit vector of [8 x float] containing undefined values. +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_undefined_ps(void) +{ + return (__m256)__builtin_ia32_undef256(); +} + +/// \brief Create a 256-bit integer vector with undefined values. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \returns A 256-bit integer vector containing undefined values. +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_undefined_si256(void) +{ + return (__m256i)__builtin_ia32_undef256(); +} + +/// \brief Constructs a 256-bit floating-point vector of [4 x double] +/// initialized with the specified double-precision floating-point values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPD+VINSERTF128 +/// instruction. +/// +/// \param __a +/// A double-precision floating-point value used to initialize bits [255:192] +/// of the result. +/// \param __b +/// A double-precision floating-point value used to initialize bits [191:128] +/// of the result. +/// \param __c +/// A double-precision floating-point value used to initialize bits [127:64] +/// of the result. +/// \param __d +/// A double-precision floating-point value used to initialize bits [63:0] +/// of the result. +/// \returns An initialized 256-bit floating-point vector of [4 x double]. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_set_pd(double __a, double __b, double __c, double __d) { return (__m256d){ __d, __c, __b, __a }; } +/// \brief Constructs a 256-bit floating-point vector of [8 x float] initialized +/// with the specified single-precision floating-point values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __a +/// A single-precision floating-point value used to initialize bits [255:224] +/// of the result. +/// \param __b +/// A single-precision floating-point value used to initialize bits [223:192] +/// of the result. +/// \param __c +/// A single-precision floating-point value used to initialize bits [191:160] +/// of the result. +/// \param __d +/// A single-precision floating-point value used to initialize bits [159:128] +/// of the result. +/// \param __e +/// A single-precision floating-point value used to initialize bits [127:96] +/// of the result. +/// \param __f +/// A single-precision floating-point value used to initialize bits [95:64] +/// of the result. +/// \param __g +/// A single-precision floating-point value used to initialize bits [63:32] +/// of the result. +/// \param __h +/// A single-precision floating-point value used to initialize bits [31:0] +/// of the result. +/// \returns An initialized 256-bit floating-point vector of [8 x float]. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_set_ps(float __a, float __b, float __c, float __d, float __e, float __f, float __g, float __h) @@ -913,6 +3538,31 @@ _mm256_set_ps(float __a, float __b, float __c, float __d, return (__m256){ __h, __g, __f, __e, __d, __c, __b, __a }; } +/// \brief Constructs a 256-bit integer vector initialized with the specified +/// 32-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __i0 +/// A 32-bit integral value used to initialize bits [255:224] of the result. +/// \param __i1 +/// A 32-bit integral value used to initialize bits [223:192] of the result. +/// \param __i2 +/// A 32-bit integral value used to initialize bits [191:160] of the result. +/// \param __i3 +/// A 32-bit integral value used to initialize bits [159:128] of the result. +/// \param __i4 +/// A 32-bit integral value used to initialize bits [127:96] of the result. +/// \param __i5 +/// A 32-bit integral value used to initialize bits [95:64] of the result. +/// \param __i6 +/// A 32-bit integral value used to initialize bits [63:32] of the result. +/// \param __i7 +/// A 32-bit integral value used to initialize bits [31:0] of the result. +/// \returns An initialized 256-bit integer vector. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set_epi32(int __i0, int __i1, int __i2, int __i3, int __i4, int __i5, int __i6, int __i7) @@ -920,6 +3570,47 @@ _mm256_set_epi32(int __i0, int __i1, int __i2, int __i3, return (__m256i)(__v8si){ __i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0 }; } +/// \brief Constructs a 256-bit integer vector initialized with the specified +/// 16-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __w15 +/// A 16-bit integral value used to initialize bits [255:240] of the result. +/// \param __w14 +/// A 16-bit integral value used to initialize bits [239:224] of the result. +/// \param __w13 +/// A 16-bit integral value used to initialize bits [223:208] of the result. +/// \param __w12 +/// A 16-bit integral value used to initialize bits [207:192] of the result. +/// \param __w11 +/// A 16-bit integral value used to initialize bits [191:176] of the result. +/// \param __w10 +/// A 16-bit integral value used to initialize bits [175:160] of the result. +/// \param __w09 +/// A 16-bit integral value used to initialize bits [159:144] of the result. +/// \param __w08 +/// A 16-bit integral value used to initialize bits [143:128] of the result. +/// \param __w07 +/// A 16-bit integral value used to initialize bits [127:112] of the result. +/// \param __w06 +/// A 16-bit integral value used to initialize bits [111:96] of the result. +/// \param __w05 +/// A 16-bit integral value used to initialize bits [95:80] of the result. +/// \param __w04 +/// A 16-bit integral value used to initialize bits [79:64] of the result. +/// \param __w03 +/// A 16-bit integral value used to initialize bits [63:48] of the result. +/// \param __w02 +/// A 16-bit integral value used to initialize bits [47:32] of the result. +/// \param __w01 +/// A 16-bit integral value used to initialize bits [31:16] of the result. +/// \param __w00 +/// A 16-bit integral value used to initialize bits [15:0] of the result. +/// \returns An initialized 256-bit integer vector. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set_epi16(short __w15, short __w14, short __w13, short __w12, short __w11, short __w10, short __w09, short __w08, @@ -930,6 +3621,79 @@ _mm256_set_epi16(short __w15, short __w14, short __w13, short __w12, __w07, __w08, __w09, __w10, __w11, __w12, __w13, __w14, __w15 }; } +/// \brief Constructs a 256-bit integer vector initialized with the specified +/// 8-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __b31 +/// An 8-bit integral value used to initialize bits [255:248] of the result. +/// \param __b30 +/// An 8-bit integral value used to initialize bits [247:240] of the result. +/// \param __b29 +/// An 8-bit integral value used to initialize bits [239:232] of the result. +/// \param __b28 +/// An 8-bit integral value used to initialize bits [231:224] of the result. +/// \param __b27 +/// An 8-bit integral value used to initialize bits [223:216] of the result. +/// \param __b26 +/// An 8-bit integral value used to initialize bits [215:208] of the result. +/// \param __b25 +/// An 8-bit integral value used to initialize bits [207:200] of the result. +/// \param __b24 +/// An 8-bit integral value used to initialize bits [199:192] of the result. +/// \param __b23 +/// An 8-bit integral value used to initialize bits [191:184] of the result. +/// \param __b22 +/// An 8-bit integral value used to initialize bits [183:176] of the result. +/// \param __b21 +/// An 8-bit integral value used to initialize bits [175:168] of the result. +/// \param __b20 +/// An 8-bit integral value used to initialize bits [167:160] of the result. +/// \param __b19 +/// An 8-bit integral value used to initialize bits [159:152] of the result. +/// \param __b18 +/// An 8-bit integral value used to initialize bits [151:144] of the result. +/// \param __b17 +/// An 8-bit integral value used to initialize bits [143:136] of the result. +/// \param __b16 +/// An 8-bit integral value used to initialize bits [135:128] of the result. +/// \param __b15 +/// An 8-bit integral value used to initialize bits [127:120] of the result. +/// \param __b14 +/// An 8-bit integral value used to initialize bits [119:112] of the result. +/// \param __b13 +/// An 8-bit integral value used to initialize bits [111:104] of the result. +/// \param __b12 +/// An 8-bit integral value used to initialize bits [103:96] of the result. +/// \param __b11 +/// An 8-bit integral value used to initialize bits [95:88] of the result. +/// \param __b10 +/// An 8-bit integral value used to initialize bits [87:80] of the result. +/// \param __b09 +/// An 8-bit integral value used to initialize bits [79:72] of the result. +/// \param __b08 +/// An 8-bit integral value used to initialize bits [71:64] of the result. +/// \param __b07 +/// An 8-bit integral value used to initialize bits [63:56] of the result. +/// \param __b06 +/// An 8-bit integral value used to initialize bits [55:48] of the result. +/// \param __b05 +/// An 8-bit integral value used to initialize bits [47:40] of the result. +/// \param __b04 +/// An 8-bit integral value used to initialize bits [39:32] of the result. +/// \param __b03 +/// An 8-bit integral value used to initialize bits [31:24] of the result. +/// \param __b02 +/// An 8-bit integral value used to initialize bits [23:16] of the result. +/// \param __b01 +/// An 8-bit integral value used to initialize bits [15:8] of the result. +/// \param __b00 +/// An 8-bit integral value used to initialize bits [7:0] of the result. +/// \returns An initialized 256-bit integer vector. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set_epi8(char __b31, char __b30, char __b29, char __b28, char __b27, char __b26, char __b25, char __b24, @@ -948,6 +3712,23 @@ _mm256_set_epi8(char __b31, char __b30, char __b29, char __b28, }; } +/// \brief Constructs a 256-bit integer vector initialized with the specified +/// 64-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKLQDQ+VINSERTF128 +/// instruction. +/// +/// \param __a +/// A 64-bit integral value used to initialize bits [255:192] of the result. +/// \param __b +/// A 64-bit integral value used to initialize bits [191:128] of the result. +/// \param __c +/// A 64-bit integral value used to initialize bits [127:64] of the result. +/// \param __d +/// A 64-bit integral value used to initialize bits [63:0] of the result. +/// \returns An initialized 256-bit integer vector. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set_epi64x(long long __a, long long __b, long long __c, long long __d) { @@ -955,12 +3736,68 @@ _mm256_set_epi64x(long long __a, long long __b, long long __c, long long __d) } /* Create vectors with elements in reverse order */ +/// \brief Constructs a 256-bit floating-point vector of [4 x double], +/// initialized in reverse order with the specified double-precision +/// floating-point values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPD+VINSERTF128 +/// instruction. +/// +/// \param __a +/// A double-precision floating-point value used to initialize bits [63:0] +/// of the result. +/// \param __b +/// A double-precision floating-point value used to initialize bits [127:64] +/// of the result. +/// \param __c +/// A double-precision floating-point value used to initialize bits [191:128] +/// of the result. +/// \param __d +/// A double-precision floating-point value used to initialize bits [255:192] +/// of the result. +/// \returns An initialized 256-bit floating-point vector of [4 x double]. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_setr_pd(double __a, double __b, double __c, double __d) { return (__m256d){ __a, __b, __c, __d }; } +/// \brief Constructs a 256-bit floating-point vector of [8 x float], +/// initialized in reverse order with the specified single-precision +/// float-point values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __a +/// A single-precision floating-point value used to initialize bits [31:0] +/// of the result. +/// \param __b +/// A single-precision floating-point value used to initialize bits [63:32] +/// of the result. +/// \param __c +/// A single-precision floating-point value used to initialize bits [95:64] +/// of the result. +/// \param __d +/// A single-precision floating-point value used to initialize bits [127:96] +/// of the result. +/// \param __e +/// A single-precision floating-point value used to initialize bits [159:128] +/// of the result. +/// \param __f +/// A single-precision floating-point value used to initialize bits [191:160] +/// of the result. +/// \param __g +/// A single-precision floating-point value used to initialize bits [223:192] +/// of the result. +/// \param __h +/// A single-precision floating-point value used to initialize bits [255:224] +/// of the result. +/// \returns An initialized 256-bit floating-point vector of [8 x float]. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_setr_ps(float __a, float __b, float __c, float __d, float __e, float __f, float __g, float __h) @@ -968,6 +3805,31 @@ _mm256_setr_ps(float __a, float __b, float __c, float __d, return (__m256){ __a, __b, __c, __d, __e, __f, __g, __h }; } +/// \brief Constructs a 256-bit integer vector, initialized in reverse order +/// with the specified 32-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __i0 +/// A 32-bit integral value used to initialize bits [31:0] of the result. +/// \param __i1 +/// A 32-bit integral value used to initialize bits [63:32] of the result. +/// \param __i2 +/// A 32-bit integral value used to initialize bits [95:64] of the result. +/// \param __i3 +/// A 32-bit integral value used to initialize bits [127:96] of the result. +/// \param __i4 +/// A 32-bit integral value used to initialize bits [159:128] of the result. +/// \param __i5 +/// A 32-bit integral value used to initialize bits [191:160] of the result. +/// \param __i6 +/// A 32-bit integral value used to initialize bits [223:192] of the result. +/// \param __i7 +/// A 32-bit integral value used to initialize bits [255:224] of the result. +/// \returns An initialized 256-bit integer vector. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_setr_epi32(int __i0, int __i1, int __i2, int __i3, int __i4, int __i5, int __i6, int __i7) @@ -975,6 +3837,47 @@ _mm256_setr_epi32(int __i0, int __i1, int __i2, int __i3, return (__m256i)(__v8si){ __i0, __i1, __i2, __i3, __i4, __i5, __i6, __i7 }; } +/// \brief Constructs a 256-bit integer vector, initialized in reverse order +/// with the specified 16-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __w15 +/// A 16-bit integral value used to initialize bits [15:0] of the result. +/// \param __w14 +/// A 16-bit integral value used to initialize bits [31:16] of the result. +/// \param __w13 +/// A 16-bit integral value used to initialize bits [47:32] of the result. +/// \param __w12 +/// A 16-bit integral value used to initialize bits [63:48] of the result. +/// \param __w11 +/// A 16-bit integral value used to initialize bits [79:64] of the result. +/// \param __w10 +/// A 16-bit integral value used to initialize bits [95:80] of the result. +/// \param __w09 +/// A 16-bit integral value used to initialize bits [111:96] of the result. +/// \param __w08 +/// A 16-bit integral value used to initialize bits [127:112] of the result. +/// \param __w07 +/// A 16-bit integral value used to initialize bits [143:128] of the result. +/// \param __w06 +/// A 16-bit integral value used to initialize bits [159:144] of the result. +/// \param __w05 +/// A 16-bit integral value used to initialize bits [175:160] of the result. +/// \param __w04 +/// A 16-bit integral value used to initialize bits [191:176] of the result. +/// \param __w03 +/// A 16-bit integral value used to initialize bits [207:192] of the result. +/// \param __w02 +/// A 16-bit integral value used to initialize bits [223:208] of the result. +/// \param __w01 +/// A 16-bit integral value used to initialize bits [239:224] of the result. +/// \param __w00 +/// A 16-bit integral value used to initialize bits [255:240] of the result. +/// \returns An initialized 256-bit integer vector. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_setr_epi16(short __w15, short __w14, short __w13, short __w12, short __w11, short __w10, short __w09, short __w08, @@ -985,6 +3888,79 @@ _mm256_setr_epi16(short __w15, short __w14, short __w13, short __w12, __w08, __w07, __w06, __w05, __w04, __w03, __w02, __w01, __w00 }; } +/// \brief Constructs a 256-bit integer vector, initialized in reverse order +/// with the specified 8-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __b31 +/// An 8-bit integral value used to initialize bits [7:0] of the result. +/// \param __b30 +/// An 8-bit integral value used to initialize bits [15:8] of the result. +/// \param __b29 +/// An 8-bit integral value used to initialize bits [23:16] of the result. +/// \param __b28 +/// An 8-bit integral value used to initialize bits [31:24] of the result. +/// \param __b27 +/// An 8-bit integral value used to initialize bits [39:32] of the result. +/// \param __b26 +/// An 8-bit integral value used to initialize bits [47:40] of the result. +/// \param __b25 +/// An 8-bit integral value used to initialize bits [55:48] of the result. +/// \param __b24 +/// An 8-bit integral value used to initialize bits [63:56] of the result. +/// \param __b23 +/// An 8-bit integral value used to initialize bits [71:64] of the result. +/// \param __b22 +/// An 8-bit integral value used to initialize bits [79:72] of the result. +/// \param __b21 +/// An 8-bit integral value used to initialize bits [87:80] of the result. +/// \param __b20 +/// An 8-bit integral value used to initialize bits [95:88] of the result. +/// \param __b19 +/// An 8-bit integral value used to initialize bits [103:96] of the result. +/// \param __b18 +/// An 8-bit integral value used to initialize bits [111:104] of the result. +/// \param __b17 +/// An 8-bit integral value used to initialize bits [119:112] of the result. +/// \param __b16 +/// An 8-bit integral value used to initialize bits [127:120] of the result. +/// \param __b15 +/// An 8-bit integral value used to initialize bits [135:128] of the result. +/// \param __b14 +/// An 8-bit integral value used to initialize bits [143:136] of the result. +/// \param __b13 +/// An 8-bit integral value used to initialize bits [151:144] of the result. +/// \param __b12 +/// An 8-bit integral value used to initialize bits [159:152] of the result. +/// \param __b11 +/// An 8-bit integral value used to initialize bits [167:160] of the result. +/// \param __b10 +/// An 8-bit integral value used to initialize bits [175:168] of the result. +/// \param __b09 +/// An 8-bit integral value used to initialize bits [183:176] of the result. +/// \param __b08 +/// An 8-bit integral value used to initialize bits [191:184] of the result. +/// \param __b07 +/// An 8-bit integral value used to initialize bits [199:192] of the result. +/// \param __b06 +/// An 8-bit integral value used to initialize bits [207:200] of the result. +/// \param __b05 +/// An 8-bit integral value used to initialize bits [215:208] of the result. +/// \param __b04 +/// An 8-bit integral value used to initialize bits [223:216] of the result. +/// \param __b03 +/// An 8-bit integral value used to initialize bits [231:224] of the result. +/// \param __b02 +/// An 8-bit integral value used to initialize bits [239:232] of the result. +/// \param __b01 +/// An 8-bit integral value used to initialize bits [247:240] of the result. +/// \param __b00 +/// An 8-bit integral value used to initialize bits [255:248] of the result. +/// \returns An initialized 256-bit integer vector. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_setr_epi8(char __b31, char __b30, char __b29, char __b28, char __b27, char __b26, char __b25, char __b24, @@ -1002,6 +3978,23 @@ _mm256_setr_epi8(char __b31, char __b30, char __b29, char __b28, __b07, __b06, __b05, __b04, __b03, __b02, __b01, __b00 }; } +/// \brief Constructs a 256-bit integer vector, initialized in reverse order +/// with the specified 64-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKLQDQ+VINSERTF128 +/// instruction. +/// +/// \param __a +/// A 64-bit integral value used to initialize bits [63:0] of the result. +/// \param __b +/// A 64-bit integral value used to initialize bits [127:64] of the result. +/// \param __c +/// A 64-bit integral value used to initialize bits [191:128] of the result. +/// \param __d +/// A 64-bit integral value used to initialize bits [255:192] of the result. +/// \returns An initialized 256-bit integer vector. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_setr_epi64x(long long __a, long long __b, long long __c, long long __d) { @@ -1009,24 +4002,74 @@ _mm256_setr_epi64x(long long __a, long long __b, long long __c, long long __d) } /* Create vectors with repeated elements */ +/// \brief Constructs a 256-bit floating-point vector of [4 x double], with each +/// of the four double-precision floating-point vector elements set to the +/// specified double-precision floating-point value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDDUP+VINSERTF128 instruction. +/// +/// \param __w +/// A double-precision floating-point value used to initialize each vector +/// element of the result. +/// \returns An initialized 256-bit floating-point vector of [4 x double]. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_set1_pd(double __w) { return (__m256d){ __w, __w, __w, __w }; } +/// \brief Constructs a 256-bit floating-point vector of [8 x float], with each +/// of the eight single-precision floating-point vector elements set to the +/// specified single-precision floating-point value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPS+VINSERTF128 +/// instruction. +/// +/// \param __w +/// A single-precision floating-point value used to initialize each vector +/// element of the result. +/// \returns An initialized 256-bit floating-point vector of [8 x float]. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_set1_ps(float __w) { return (__m256){ __w, __w, __w, __w, __w, __w, __w, __w }; } +/// \brief Constructs a 256-bit integer vector of [8 x i32], with each of the +/// 32-bit integral vector elements set to the specified 32-bit integral +/// value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPS+VINSERTF128 +/// instruction. +/// +/// \param __i +/// A 32-bit integral value used to initialize each vector element of the +/// result. +/// \returns An initialized 256-bit integer vector of [8 x i32]. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set1_epi32(int __i) { return (__m256i)(__v8si){ __i, __i, __i, __i, __i, __i, __i, __i }; } +/// \brief Constructs a 256-bit integer vector of [16 x i16], with each of the +/// 16-bit integral vector elements set to the specified 16-bit integral +/// value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSHUFB+VINSERTF128 instruction. +/// +/// \param __w +/// A 16-bit integral value used to initialize each vector element of the +/// result. +/// \returns An initialized 256-bit integer vector of [16 x i16]. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set1_epi16(short __w) { @@ -1034,6 +4077,17 @@ _mm256_set1_epi16(short __w) __w, __w, __w, __w, __w, __w }; } +/// \brief Constructs a 256-bit integer vector of [32 x i8], with each of the +/// 8-bit integral vector elements set to the specified 8-bit integral value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSHUFB+VINSERTF128 instruction. +/// +/// \param __b +/// An 8-bit integral value used to initialize each vector element of the +/// result. +/// \returns An initialized 256-bit integer vector of [32 x i8]. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set1_epi8(char __b) { @@ -1042,6 +4096,18 @@ _mm256_set1_epi8(char __b) __b, __b, __b, __b, __b, __b, __b }; } +/// \brief Constructs a 256-bit integer vector of [4 x i64], with each of the +/// 64-bit integral vector elements set to the specified 64-bit integral +/// value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDDUP+VINSERTF128 instruction. +/// +/// \param __q +/// A 64-bit integral value used to initialize each vector element of the +/// result. +/// \returns An initialized 256-bit integer vector of [4 x i64]. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set1_epi64x(long long __q) { @@ -1049,18 +4115,41 @@ _mm256_set1_epi64x(long long __q) } /* Create __zeroed vectors */ +/// \brief Constructs a 256-bit floating-point vector of [4 x double] with all +/// vector elements initialized to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPS instruction. +/// +/// \returns A 256-bit vector of [4 x double] with all elements set to zero. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_setzero_pd(void) { return (__m256d){ 0, 0, 0, 0 }; } +/// \brief Constructs a 256-bit floating-point vector of [8 x float] with all +/// vector elements initialized to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPS instruction. +/// +/// \returns A 256-bit vector of [8 x float] with all elements set to zero. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_setzero_ps(void) { return (__m256){ 0, 0, 0, 0, 0, 0, 0, 0 }; } +/// \brief Constructs a 256-bit integer vector initialized to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPS instruction. +/// +/// \returns A 256-bit integer vector initialized to zero. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_setzero_si256(void) { @@ -1068,86 +4157,256 @@ _mm256_setzero_si256(void) } /* Cast between vector types */ +/// \brief Casts a 256-bit floating-point vector of [4 x double] into a 256-bit +/// floating-point vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [4 x double]. +/// \returns A 256-bit floating-point vector of [8 x float] containing the same +/// bitwise pattern as the parameter. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_castpd_ps(__m256d __a) { return (__m256)__a; } +/// \brief Casts a 256-bit floating-point vector of [4 x double] into a 256-bit +/// integer vector. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [4 x double]. +/// \returns A 256-bit integer vector containing the same bitwise pattern as the +/// parameter. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_castpd_si256(__m256d __a) { return (__m256i)__a; } +/// \brief Casts a 256-bit floating-point vector of [8 x float] into a 256-bit +/// floating-point vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [8 x float]. +/// \returns A 256-bit floating-point vector of [4 x double] containing the same +/// bitwise pattern as the parameter. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_castps_pd(__m256 __a) { return (__m256d)__a; } +/// \brief Casts a 256-bit floating-point vector of [8 x float] into a 256-bit +/// integer vector. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [8 x float]. +/// \returns A 256-bit integer vector containing the same bitwise pattern as the +/// parameter. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_castps_si256(__m256 __a) { return (__m256i)__a; } +/// \brief Casts a 256-bit integer vector into a 256-bit floating-point vector +/// of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \returns A 256-bit floating-point vector of [8 x float] containing the same +/// bitwise pattern as the parameter. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_castsi256_ps(__m256i __a) { return (__m256)__a; } +/// \brief Casts a 256-bit integer vector into a 256-bit floating-point vector +/// of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \returns A 256-bit floating-point vector of [4 x double] containing the same +/// bitwise pattern as the parameter. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_castsi256_pd(__m256i __a) { return (__m256d)__a; } +/// \brief Returns the lower 128 bits of a 256-bit floating-point vector of +/// [4 x double] as a 128-bit floating-point vector of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [4 x double]. +/// \returns A 128-bit floating-point vector of [2 x double] containing the +/// lower 128 bits of the parameter. static __inline __m128d __DEFAULT_FN_ATTRS _mm256_castpd256_pd128(__m256d __a) { - return __builtin_shufflevector(__a, __a, 0, 1); + return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 1); } +/// \brief Returns the lower 128 bits of a 256-bit floating-point vector of +/// [8 x float] as a 128-bit floating-point vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [8 x float]. +/// \returns A 128-bit floating-point vector of [4 x float] containing the +/// lower 128 bits of the parameter. static __inline __m128 __DEFAULT_FN_ATTRS _mm256_castps256_ps128(__m256 __a) { - return __builtin_shufflevector(__a, __a, 0, 1, 2, 3); + return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 1, 2, 3); } +/// \brief Truncates a 256-bit integer vector into a 128-bit integer vector. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \returns A 128-bit integer vector containing the lower 128 bits of the +/// parameter. static __inline __m128i __DEFAULT_FN_ATTRS _mm256_castsi256_si128(__m256i __a) { - return __builtin_shufflevector(__a, __a, 0, 1); + return __builtin_shufflevector((__v4di)__a, (__v4di)__a, 0, 1); } +/// \brief Constructs a 256-bit floating-point vector of [4 x double] from a +/// 128-bit floating-point vector of [2 x double]. The lower 128 bits +/// contain the value of the source vector. The contents of the upper 128 +/// bits are undefined. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 256-bit floating-point vector of [4 x double]. The lower 128 bits +/// contain the value of the parameter. The contents of the upper 128 bits +/// are undefined. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_castpd128_pd256(__m128d __a) { - return __builtin_shufflevector(__a, __a, 0, 1, -1, -1); + return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 1, -1, -1); } +/// \brief Constructs a 256-bit floating-point vector of [8 x float] from a +/// 128-bit floating-point vector of [4 x float]. The lower 128 bits contain +/// the value of the source vector. The contents of the upper 128 bits are +/// undefined. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 256-bit floating-point vector of [8 x float]. The lower 128 bits +/// contain the value of the parameter. The contents of the upper 128 bits +/// are undefined. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_castps128_ps256(__m128 __a) { - return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1); + return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1, 2, 3, -1, -1, -1, -1); } +/// \brief Constructs a 256-bit integer vector from a 128-bit integer vector. +/// The lower 128 bits contain the value of the source vector. The contents +/// of the upper 128 bits are undefined. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \returns A 256-bit integer vector. The lower 128 bits contain the value of +/// the parameter. The contents of the upper 128 bits are undefined. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_castsi128_si256(__m128i __a) { - return __builtin_shufflevector(__a, __a, 0, 1, -1, -1); + return __builtin_shufflevector((__v2di)__a, (__v2di)__a, 0, 1, -1, -1); } -/* +/* Vector insert. We use macros rather than inlines because we only want to accept invocations where the immediate M is a constant expression. */ +/// \brief Constructs a new 256-bit vector of [8 x float] by first duplicating +/// a 256-bit vector of [8 x float] given in the first parameter, and then +/// replacing either the upper or the lower 128 bits with the contents of a +/// 128-bit vector of [4 x float] in the second parameter. The immediate +/// integer parameter determines between the upper or the lower 128 bits. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_insertf128_ps(__m256 V1, __m128 V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param V1 +/// A 256-bit vector of [8 x float]. This vector is copied to the result +/// first, and then either the upper or the lower 128 bits of the result will +/// be replaced by the contents of \a V2. +/// \param V2 +/// A 128-bit vector of [4 x float]. The contents of this parameter are +/// written to either the upper or the lower 128 bits of the result depending +/// on the value of parameter \a M. +/// \param M +/// An immediate integer. The least significant bit determines how the values +/// from the two parameters are interleaved: \n +/// If bit [0] of \a M is 0, \a V2 are copied to bits [127:0] of the result, +/// and bits [255:128] of \a V1 are copied to bits [255:128] of the +/// result. \n +/// If bit [0] of \a M is 1, \a V2 are copied to bits [255:128] of the +/// result, and bits [127:0] of \a V1 are copied to bits [127:0] of the +/// result. +/// \returns A 256-bit vector of [8 x float] containing the interleaved values. #define _mm256_insertf128_ps(V1, V2, M) __extension__ ({ \ (__m256)__builtin_shufflevector( \ - (__v8sf)(V1), \ + (__v8sf)(__m256)(V1), \ (__v8sf)_mm256_castps128_ps256((__m128)(V2)), \ (((M) & 1) ? 0 : 8), \ (((M) & 1) ? 1 : 9), \ @@ -1158,148 +4417,475 @@ _mm256_castsi128_si256(__m128i __a) (((M) & 1) ? 10 : 6), \ (((M) & 1) ? 11 : 7) );}) +/// \brief Constructs a new 256-bit vector of [4 x double] by first duplicating +/// a 256-bit vector of [4 x double] given in the first parameter, and then +/// replacing either the upper or the lower 128 bits with the contents of a +/// 128-bit vector of [2 x double] in the second parameter. The immediate +/// integer parameter determines between the upper or the lower 128 bits. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_insertf128_pd(__m256d V1, __m128d V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param V1 +/// A 256-bit vector of [4 x double]. This vector is copied to the result +/// first, and then either the upper or the lower 128 bits of the result will +/// be replaced by the contents of \a V2. +/// \param V2 +/// A 128-bit vector of [2 x double]. The contents of this parameter are +/// written to either the upper or the lower 128 bits of the result depending +/// on the value of parameter \a M. +/// \param M +/// An immediate integer. The least significant bit determines how the values +/// from the two parameters are interleaved: \n +/// If bit [0] of \a M is 0, \a V2 are copied to bits [127:0] of the result, +/// and bits [255:128] of \a V1 are copied to bits [255:128] of the +/// result. \n +/// If bit [0] of \a M is 1, \a V2 are copied to bits [255:128] of the +/// result, and bits [127:0] of \a V1 are copied to bits [127:0] of the +/// result. +/// \returns A 256-bit vector of [4 x double] containing the interleaved values. #define _mm256_insertf128_pd(V1, V2, M) __extension__ ({ \ (__m256d)__builtin_shufflevector( \ - (__v4df)(V1), \ + (__v4df)(__m256d)(V1), \ (__v4df)_mm256_castpd128_pd256((__m128d)(V2)), \ (((M) & 1) ? 0 : 4), \ (((M) & 1) ? 1 : 5), \ (((M) & 1) ? 4 : 2), \ (((M) & 1) ? 5 : 3) );}) +/// \brief Constructs a new 256-bit integer vector by first duplicating a +/// 256-bit integer vector given in the first parameter, and then replacing +/// either the upper or the lower 128 bits with the contents of a 128-bit +/// integer vector in the second parameter. The immediate integer parameter +/// determines between the upper or the lower 128 bits. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_insertf128_si256(__m256i V1, __m128i V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param V1 +/// A 256-bit integer vector. This vector is copied to the result first, and +/// then either the upper or the lower 128 bits of the result will be +/// replaced by the contents of \a V2. +/// \param V2 +/// A 128-bit integer vector. The contents of this parameter are written to +/// either the upper or the lower 128 bits of the result depending on the +/// value of parameter \a M. +/// \param M +/// An immediate integer. The least significant bit determines how the values +/// from the two parameters are interleaved: \n +/// If bit [0] of \a M is 0, \a V2 are copied to bits [127:0] of the result, +/// and bits [255:128] of \a V1 are copied to bits [255:128] of the +/// result. \n +/// If bit [0] of \a M is 1, \a V2 are copied to bits [255:128] of the +/// result, and bits [127:0] of \a V1 are copied to bits [127:0] of the +/// result. +/// \returns A 256-bit integer vector containing the interleaved values. #define _mm256_insertf128_si256(V1, V2, M) __extension__ ({ \ (__m256i)__builtin_shufflevector( \ - (__v4di)(V1), \ + (__v4di)(__m256i)(V1), \ (__v4di)_mm256_castsi128_si256((__m128i)(V2)), \ (((M) & 1) ? 0 : 4), \ (((M) & 1) ? 1 : 5), \ (((M) & 1) ? 4 : 2), \ (((M) & 1) ? 5 : 3) );}) -/* +/* Vector extract. We use macros rather than inlines because we only want to accept invocations where the immediate M is a constant expression. */ +/// \brief Extracts either the upper or the lower 128 bits from a 256-bit vector +/// of [8 x float], as determined by the immediate integer parameter, and +/// returns the extracted bits as a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm256_extractf128_ps(__m256 V, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VEXTRACTF128 instruction. +/// +/// \param V +/// A 256-bit vector of [8 x float]. +/// \param M +/// An immediate integer. The least significant bit determines which bits are +/// extracted from the first parameter: \n +/// If bit [0] of \a M is 0, bits [127:0] of \a V are copied to the +/// result. \n +/// If bit [0] of \a M is 1, bits [255:128] of \a V are copied to the result. +/// \returns A 128-bit vector of [4 x float] containing the extracted bits. #define _mm256_extractf128_ps(V, M) __extension__ ({ \ (__m128)__builtin_shufflevector( \ - (__v8sf)(V), \ - (__v8sf)(_mm256_setzero_ps()), \ + (__v8sf)(__m256)(V), \ + (__v8sf)(_mm256_undefined_ps()), \ (((M) & 1) ? 4 : 0), \ (((M) & 1) ? 5 : 1), \ (((M) & 1) ? 6 : 2), \ (((M) & 1) ? 7 : 3) );}) +/// \brief Extracts either the upper or the lower 128 bits from a 256-bit vector +/// of [4 x double], as determined by the immediate integer parameter, and +/// returns the extracted bits as a 128-bit vector of [2 x double]. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm256_extractf128_pd(__m256d V, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VEXTRACTF128 instruction. +/// +/// \param V +/// A 256-bit vector of [4 x double]. +/// \param M +/// An immediate integer. The least significant bit determines which bits are +/// extracted from the first parameter: \n +/// If bit [0] of \a M is 0, bits [127:0] of \a V are copied to the +/// result. \n +/// If bit [0] of \a M is 1, bits [255:128] of \a V are copied to the result. +/// \returns A 128-bit vector of [2 x double] containing the extracted bits. #define _mm256_extractf128_pd(V, M) __extension__ ({ \ (__m128d)__builtin_shufflevector( \ - (__v4df)(V), \ - (__v4df)(_mm256_setzero_pd()), \ + (__v4df)(__m256d)(V), \ + (__v4df)(_mm256_undefined_pd()), \ (((M) & 1) ? 2 : 0), \ (((M) & 1) ? 3 : 1) );}) +/// \brief Extracts either the upper or the lower 128 bits from a 256-bit +/// integer vector, as determined by the immediate integer parameter, and +/// returns the extracted bits as a 128-bit integer vector. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm256_extractf128_si256(__m256i V, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VEXTRACTF128 instruction. +/// +/// \param V +/// A 256-bit integer vector. +/// \param M +/// An immediate integer. The least significant bit determines which bits are +/// extracted from the first parameter: \n +/// If bit [0] of \a M is 0, bits [127:0] of \a V are copied to the +/// result. \n +/// If bit [0] of \a M is 1, bits [255:128] of \a V are copied to the result. +/// \returns A 128-bit integer vector containing the extracted bits. #define _mm256_extractf128_si256(V, M) __extension__ ({ \ (__m128i)__builtin_shufflevector( \ - (__v4di)(V), \ - (__v4di)(_mm256_setzero_si256()), \ + (__v4di)(__m256i)(V), \ + (__v4di)(_mm256_undefined_si256()), \ (((M) & 1) ? 2 : 0), \ (((M) & 1) ? 3 : 1) );}) /* SIMD load ops (unaligned) */ +/// \brief Loads two 128-bit floating-point vectors of [4 x float] from +/// unaligned memory locations and constructs a 256-bit floating-point vector +/// of [8 x float] by concatenating the two 128-bit vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to load instructions followed by the +/// VINSERTF128 instruction. +/// +/// \param __addr_hi +/// A pointer to a 128-bit memory location containing 4 consecutive +/// single-precision floating-point values. These values are to be copied to +/// bits[255:128] of the result. The address of the memory location does not +/// have to be aligned. +/// \param __addr_lo +/// A pointer to a 128-bit memory location containing 4 consecutive +/// single-precision floating-point values. These values are to be copied to +/// bits[127:0] of the result. The address of the memory location does not +/// have to be aligned. +/// \returns A 256-bit floating-point vector of [8 x float] containing the +/// concatenated result. static __inline __m256 __DEFAULT_FN_ATTRS _mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo) { - struct __loadu_ps { - __m128 __v; - } __attribute__((__packed__, __may_alias__)); - - __m256 __v256 = _mm256_castps128_ps256(((struct __loadu_ps*)__addr_lo)->__v); - return _mm256_insertf128_ps(__v256, ((struct __loadu_ps*)__addr_hi)->__v, 1); + __m256 __v256 = _mm256_castps128_ps256(_mm_loadu_ps(__addr_lo)); + return _mm256_insertf128_ps(__v256, _mm_loadu_ps(__addr_hi), 1); } +/// \brief Loads two 128-bit floating-point vectors of [2 x double] from +/// unaligned memory locations and constructs a 256-bit floating-point vector +/// of [4 x double] by concatenating the two 128-bit vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to load instructions followed by the +/// VINSERTF128 instruction. +/// +/// \param __addr_hi +/// A pointer to a 128-bit memory location containing two consecutive +/// double-precision floating-point values. These values are to be copied to +/// bits[255:128] of the result. The address of the memory location does not +/// have to be aligned. +/// \param __addr_lo +/// A pointer to a 128-bit memory location containing two consecutive +/// double-precision floating-point values. These values are to be copied to +/// bits[127:0] of the result. The address of the memory location does not +/// have to be aligned. +/// \returns A 256-bit floating-point vector of [4 x double] containing the +/// concatenated result. static __inline __m256d __DEFAULT_FN_ATTRS _mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo) { - struct __loadu_pd { - __m128d __v; - } __attribute__((__packed__, __may_alias__)); - - __m256d __v256 = _mm256_castpd128_pd256(((struct __loadu_pd*)__addr_lo)->__v); - return _mm256_insertf128_pd(__v256, ((struct __loadu_pd*)__addr_hi)->__v, 1); + __m256d __v256 = _mm256_castpd128_pd256(_mm_loadu_pd(__addr_lo)); + return _mm256_insertf128_pd(__v256, _mm_loadu_pd(__addr_hi), 1); } +/// \brief Loads two 128-bit integer vectors from unaligned memory locations and +/// constructs a 256-bit integer vector by concatenating the two 128-bit +/// vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to load instructions followed by the +/// VINSERTF128 instruction. +/// +/// \param __addr_hi +/// A pointer to a 128-bit memory location containing a 128-bit integer +/// vector. This vector is to be copied to bits[255:128] of the result. The +/// address of the memory location does not have to be aligned. +/// \param __addr_lo +/// A pointer to a 128-bit memory location containing a 128-bit integer +/// vector. This vector is to be copied to bits[127:0] of the result. The +/// address of the memory location does not have to be aligned. +/// \returns A 256-bit integer vector containing the concatenated result. static __inline __m256i __DEFAULT_FN_ATTRS _mm256_loadu2_m128i(__m128i const *__addr_hi, __m128i const *__addr_lo) { - struct __loadu_si128 { - __m128i __v; - } __attribute__((__packed__, __may_alias__)); - __m256i __v256 = _mm256_castsi128_si256( - ((struct __loadu_si128*)__addr_lo)->__v); - return _mm256_insertf128_si256(__v256, - ((struct __loadu_si128*)__addr_hi)->__v, 1); + __m256i __v256 = _mm256_castsi128_si256(_mm_loadu_si128(__addr_lo)); + return _mm256_insertf128_si256(__v256, _mm_loadu_si128(__addr_hi), 1); } /* SIMD store ops (unaligned) */ +/// \brief Stores the upper and lower 128 bits of a 256-bit floating-point +/// vector of [8 x float] into two different unaligned memory locations. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VEXTRACTF128 instruction and the +/// store instructions. +/// +/// \param __addr_hi +/// A pointer to a 128-bit memory location. Bits[255:128] of \a __a are to be +/// copied to this memory location. The address of this memory location does +/// not have to be aligned. +/// \param __addr_lo +/// A pointer to a 128-bit memory location. Bits[127:0] of \a __a are to be +/// copied to this memory location. The address of this memory location does +/// not have to be aligned. +/// \param __a +/// A 256-bit floating-point vector of [8 x float]. static __inline void __DEFAULT_FN_ATTRS _mm256_storeu2_m128(float *__addr_hi, float *__addr_lo, __m256 __a) { __m128 __v128; __v128 = _mm256_castps256_ps128(__a); - __builtin_ia32_storeups(__addr_lo, __v128); + _mm_storeu_ps(__addr_lo, __v128); __v128 = _mm256_extractf128_ps(__a, 1); - __builtin_ia32_storeups(__addr_hi, __v128); + _mm_storeu_ps(__addr_hi, __v128); } +/// \brief Stores the upper and lower 128 bits of a 256-bit floating-point +/// vector of [4 x double] into two different unaligned memory locations. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VEXTRACTF128 instruction and the +/// store instructions. +/// +/// \param __addr_hi +/// A pointer to a 128-bit memory location. Bits[255:128] of \a __a are to be +/// copied to this memory location. The address of this memory location does +/// not have to be aligned. +/// \param __addr_lo +/// A pointer to a 128-bit memory location. Bits[127:0] of \a __a are to be +/// copied to this memory location. The address of this memory location does +/// not have to be aligned. +/// \param __a +/// A 256-bit floating-point vector of [4 x double]. static __inline void __DEFAULT_FN_ATTRS _mm256_storeu2_m128d(double *__addr_hi, double *__addr_lo, __m256d __a) { __m128d __v128; __v128 = _mm256_castpd256_pd128(__a); - __builtin_ia32_storeupd(__addr_lo, __v128); + _mm_storeu_pd(__addr_lo, __v128); __v128 = _mm256_extractf128_pd(__a, 1); - __builtin_ia32_storeupd(__addr_hi, __v128); + _mm_storeu_pd(__addr_hi, __v128); } +/// \brief Stores the upper and lower 128 bits of a 256-bit integer vector into +/// two different unaligned memory locations. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VEXTRACTF128 instruction and the +/// store instructions. +/// +/// \param __addr_hi +/// A pointer to a 128-bit memory location. Bits[255:128] of \a __a are to be +/// copied to this memory location. The address of this memory location does +/// not have to be aligned. +/// \param __addr_lo +/// A pointer to a 128-bit memory location. Bits[127:0] of \a __a are to be +/// copied to this memory location. The address of this memory location does +/// not have to be aligned. +/// \param __a +/// A 256-bit integer vector. static __inline void __DEFAULT_FN_ATTRS _mm256_storeu2_m128i(__m128i *__addr_hi, __m128i *__addr_lo, __m256i __a) { __m128i __v128; __v128 = _mm256_castsi256_si128(__a); - __builtin_ia32_storedqu((char *)__addr_lo, (__v16qi)__v128); + _mm_storeu_si128(__addr_lo, __v128); __v128 = _mm256_extractf128_si256(__a, 1); - __builtin_ia32_storedqu((char *)__addr_hi, (__v16qi)__v128); + _mm_storeu_si128(__addr_hi, __v128); } +/// \brief Constructs a 256-bit floating-point vector of [8 x float] by +/// concatenating two 128-bit floating-point vectors of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param __hi +/// A 128-bit floating-point vector of [4 x float] to be copied to the upper +/// 128 bits of the result. +/// \param __lo +/// A 128-bit floating-point vector of [4 x float] to be copied to the lower +/// 128 bits of the result. +/// \returns A 256-bit floating-point vector of [8 x float] containing the +/// concatenated result. static __inline __m256 __DEFAULT_FN_ATTRS -_mm256_set_m128 (__m128 __hi, __m128 __lo) { - return (__m256) __builtin_shufflevector(__lo, __hi, 0, 1, 2, 3, 4, 5, 6, 7); +_mm256_set_m128 (__m128 __hi, __m128 __lo) +{ + return (__m256) __builtin_shufflevector((__v4sf)__lo, (__v4sf)__hi, 0, 1, 2, 3, 4, 5, 6, 7); } +/// \brief Constructs a 256-bit floating-point vector of [4 x double] by +/// concatenating two 128-bit floating-point vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param __hi +/// A 128-bit floating-point vector of [2 x double] to be copied to the upper +/// 128 bits of the result. +/// \param __lo +/// A 128-bit floating-point vector of [2 x double] to be copied to the lower +/// 128 bits of the result. +/// \returns A 256-bit floating-point vector of [4 x double] containing the +/// concatenated result. static __inline __m256d __DEFAULT_FN_ATTRS -_mm256_set_m128d (__m128d __hi, __m128d __lo) { +_mm256_set_m128d (__m128d __hi, __m128d __lo) +{ return (__m256d)_mm256_set_m128((__m128)__hi, (__m128)__lo); } +/// \brief Constructs a 256-bit integer vector by concatenating two 128-bit +/// integer vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param __hi +/// A 128-bit integer vector to be copied to the upper 128 bits of the +/// result. +/// \param __lo +/// A 128-bit integer vector to be copied to the lower 128 bits of the +/// result. +/// \returns A 256-bit integer vector containing the concatenated result. static __inline __m256i __DEFAULT_FN_ATTRS -_mm256_set_m128i (__m128i __hi, __m128i __lo) { +_mm256_set_m128i (__m128i __hi, __m128i __lo) +{ return (__m256i)_mm256_set_m128((__m128)__hi, (__m128)__lo); } +/// \brief Constructs a 256-bit floating-point vector of [8 x float] by +/// concatenating two 128-bit floating-point vectors of [4 x float]. This is +/// similar to _mm256_set_m128, but the order of the input parameters is +/// swapped. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param __lo +/// A 128-bit floating-point vector of [4 x float] to be copied to the lower +/// 128 bits of the result. +/// \param __hi +/// A 128-bit floating-point vector of [4 x float] to be copied to the upper +/// 128 bits of the result. +/// \returns A 256-bit floating-point vector of [8 x float] containing the +/// concatenated result. static __inline __m256 __DEFAULT_FN_ATTRS -_mm256_setr_m128 (__m128 __lo, __m128 __hi) { +_mm256_setr_m128 (__m128 __lo, __m128 __hi) +{ return _mm256_set_m128(__hi, __lo); } +/// \brief Constructs a 256-bit floating-point vector of [4 x double] by +/// concatenating two 128-bit floating-point vectors of [2 x double]. This is +/// similar to _mm256_set_m128d, but the order of the input parameters is +/// swapped. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param __lo +/// A 128-bit floating-point vector of [2 x double] to be copied to the lower +/// 128 bits of the result. +/// \param __hi +/// A 128-bit floating-point vector of [2 x double] to be copied to the upper +/// 128 bits of the result. +/// \returns A 256-bit floating-point vector of [4 x double] containing the +/// concatenated result. static __inline __m256d __DEFAULT_FN_ATTRS -_mm256_setr_m128d (__m128d __lo, __m128d __hi) { +_mm256_setr_m128d (__m128d __lo, __m128d __hi) +{ return (__m256d)_mm256_set_m128((__m128)__hi, (__m128)__lo); } +/// \brief Constructs a 256-bit integer vector by concatenating two 128-bit +/// integer vectors. This is similar to _mm256_set_m128i, but the order of +/// the input parameters is swapped. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param __lo +/// A 128-bit integer vector to be copied to the lower 128 bits of the +/// result. +/// \param __hi +/// A 128-bit integer vector to be copied to the upper 128 bits of the +/// result. +/// \returns A 256-bit integer vector containing the concatenated result. static __inline __m256i __DEFAULT_FN_ATTRS -_mm256_setr_m128i (__m128i __lo, __m128i __hi) { +_mm256_setr_m128i (__m128i __lo, __m128i __hi) +{ return (__m256i)_mm256_set_m128((__m128)__hi, (__m128)__lo); } diff --git a/c_headers/bmi2intrin.h b/c_headers/bmi2intrin.h index c63397c96..fdae82cf2 100644 --- a/c_headers/bmi2intrin.h +++ b/c_headers/bmi2intrin.h @@ -25,15 +25,11 @@ #error "Never use directly; include instead." #endif -#ifndef __BMI2__ -# error "BMI2 instruction set not enabled" -#endif /* __BMI2__ */ - #ifndef __BMI2INTRIN_H #define __BMI2INTRIN_H /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi2"))) static __inline__ unsigned int __DEFAULT_FN_ATTRS _bzhi_u32(unsigned int __X, unsigned int __Y) diff --git a/c_headers/bmiintrin.h b/c_headers/bmiintrin.h index 0e93d575c..488eb2dbd 100644 --- a/c_headers/bmiintrin.h +++ b/c_headers/bmiintrin.h @@ -25,30 +25,149 @@ #error "Never use directly; include instead." #endif -#ifndef __BMI__ -# error "BMI instruction set not enabled" -#endif /* __BMI__ */ - #ifndef __BMIINTRIN_H #define __BMIINTRIN_H +/// \brief Counts the number of trailing zero bits in the operand. +/// +/// \headerfile +/// +/// \code +/// unsigned short _tzcnt_u16(unsigned short a); +/// \endcode +/// +/// This intrinsic corresponds to the TZCNT instruction. +/// +/// \param a +/// An unsigned 16-bit integer whose trailing zeros are to be counted. +/// \returns An unsigned 16-bit integer containing the number of trailing zero +/// bits in the operand. #define _tzcnt_u16(a) (__tzcnt_u16((a))) + +/// \brief Performs a bitwise AND of the second operand with the one's +/// complement of the first operand. +/// +/// \headerfile +/// +/// \code +/// unsigned int _andn_u32(unsigned int a, unsigned int b); +/// \endcode +/// +/// This intrinsic corresponds to the ANDN instruction. +/// +/// \param a +/// An unsigned integer containing one of the operands. +/// \param b +/// An unsigned integer containing one of the operands. +/// \returns An unsigned integer containing the bitwise AND of the second +/// operand with the one's complement of the first operand. #define _andn_u32(a, b) (__andn_u32((a), (b))) + /* _bextr_u32 != __bextr_u32 */ +/// \brief Clears all bits in the source except for the least significant bit +/// containing a value of 1 and returns the result. +/// +/// \headerfile +/// +/// \code +/// unsigned int _blsi_u32(unsigned int a); +/// \endcode +/// +/// This intrinsic corresponds to the BLSI instruction. +/// +/// \param a +/// An unsigned integer whose bits are to be cleared. +/// \returns An unsigned integer containing the result of clearing the bits from +/// the source operand. #define _blsi_u32(a) (__blsi_u32((a))) + +/// \brief Creates a mask whose bits are set to 1, using bit 0 up to and +/// including the least siginificant bit that is set to 1 in the source +/// operand and returns the result. +/// +/// \headerfile +/// +/// \code +/// unsigned int _blsmsk_u32(unsigned int a); +/// \endcode +/// +/// This intrinsic corresponds to the BLSMSK instruction. +/// +/// \param a +/// An unsigned integer used to create the mask. +/// \returns An unsigned integer containing the newly created mask. #define _blsmsk_u32(a) (__blsmsk_u32((a))) + +/// \brief Clears the least siginificant bit that is set to 1 in the source +/// operand and returns the result. +/// +/// \headerfile +/// +/// \code +/// unsigned int _blsr_u32(unsigned int a); +/// \endcode +/// +/// This intrinsic corresponds to the BLSR instruction. +/// +/// \param a +/// An unsigned integer containing the operand to be cleared. +/// \returns An unsigned integer containing the result of clearing the source +/// operand. #define _blsr_u32(a) (__blsr_u32((a))) + +/// \brief Counts the number of trailing zero bits in the operand. +/// +/// \headerfile +/// +/// \code +/// unsigned int _tzcnt_u32(unsigned int a); +/// \endcode +/// +/// This intrinsic corresponds to the TZCNT instruction. +/// +/// \param a +/// An unsigned 32-bit integer whose trailing zeros are to be counted. +/// \returns An unsigned 32-bit integer containing the number of trailing zero +/// bits in the operand. #define _tzcnt_u32(a) (__tzcnt_u32((a))) /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi"))) -static __inline__ unsigned short __DEFAULT_FN_ATTRS +/* Allow using the tzcnt intrinsics even for non-BMI targets. Since the TZCNT + instruction behaves as BSF on non-BMI targets, there is code that expects + to use it as a potentially faster version of BSF. */ +#define __RELAXED_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) + +/// \brief Counts the number of trailing zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TZCNT instruction. +/// +/// \param __X +/// An unsigned 16-bit integer whose trailing zeros are to be counted. +/// \returns An unsigned 16-bit integer containing the number of trailing zero +/// bits in the operand. +static __inline__ unsigned short __RELAXED_FN_ATTRS __tzcnt_u16(unsigned short __X) { return __X ? __builtin_ctzs(__X) : 16; } +/// \brief Performs a bitwise AND of the second operand with the one's +/// complement of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the ANDN instruction. +/// +/// \param __X +/// An unsigned integer containing one of the operands. +/// \param __Y +/// An unsigned integer containing one of the operands. +/// \returns An unsigned integer containing the bitwise AND of the second +/// operand with the one's complement of the first operand. static __inline__ unsigned int __DEFAULT_FN_ATTRS __andn_u32(unsigned int __X, unsigned int __Y) { @@ -56,6 +175,21 @@ __andn_u32(unsigned int __X, unsigned int __Y) } /* AMD-specified, double-leading-underscore version of BEXTR */ +/// \brief Extracts the specified bits from the first operand and returns them +/// in the least significant bits of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the BEXTR instruction. +/// +/// \param __X +/// An unsigned integer whose bits are to be extracted. +/// \param __Y +/// An unsigned integer used to specify which bits are extracted. Bits [7:0] +/// specify the index of the least significant bit. Bits [15:8] specify the +/// number of bits to be extracted. +/// \returns An unsigned integer whose least significant bits contain the +/// extracted bits. static __inline__ unsigned int __DEFAULT_FN_ATTRS __bextr_u32(unsigned int __X, unsigned int __Y) { @@ -63,45 +197,214 @@ __bextr_u32(unsigned int __X, unsigned int __Y) } /* Intel-specified, single-leading-underscore version of BEXTR */ +/// \brief Extracts the specified bits from the first operand and returns them +/// in the least significant bits of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the BEXTR instruction. +/// +/// \param __X +/// An unsigned integer whose bits are to be extracted. +/// \param __Y +/// An unsigned integer used to specify the index of the least significant +/// bit for the bits to be extracted. Bits [7:0] specify the index. +/// \param __Z +/// An unsigned integer used to specify the number of bits to be extracted. +/// Bits [7:0] specify the number of bits. +/// \returns An unsigned integer whose least significant bits contain the +/// extracted bits. static __inline__ unsigned int __DEFAULT_FN_ATTRS _bextr_u32(unsigned int __X, unsigned int __Y, unsigned int __Z) { return __builtin_ia32_bextr_u32 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8))); } +/// \brief Clears all bits in the source except for the least significant bit +/// containing a value of 1 and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the BLSI instruction. +/// +/// \param __X +/// An unsigned integer whose bits are to be cleared. +/// \returns An unsigned integer containing the result of clearing the bits from +/// the source operand. static __inline__ unsigned int __DEFAULT_FN_ATTRS __blsi_u32(unsigned int __X) { return __X & -__X; } +/// \brief Creates a mask whose bits are set to 1, using bit 0 up to and +/// including the least siginificant bit that is set to 1 in the source +/// operand and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the BLSMSK instruction. +/// +/// \param __X +/// An unsigned integer used to create the mask. +/// \returns An unsigned integer containing the newly created mask. static __inline__ unsigned int __DEFAULT_FN_ATTRS __blsmsk_u32(unsigned int __X) { return __X ^ (__X - 1); } +/// \brief Clears the least siginificant bit that is set to 1 in the source +/// operand and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the BLSR instruction. +/// +/// \param __X +/// An unsigned integer containing the operand to be cleared. +/// \returns An unsigned integer containing the result of clearing the source +/// operand. static __inline__ unsigned int __DEFAULT_FN_ATTRS __blsr_u32(unsigned int __X) { return __X & (__X - 1); } -static __inline__ unsigned int __DEFAULT_FN_ATTRS +/// \brief Counts the number of trailing zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TZCNT instruction. +/// +/// \param __X +/// An unsigned 32-bit integer whose trailing zeros are to be counted. +/// \returns An unsigned 32-bit integer containing the number of trailing zero +/// bits in the operand. +static __inline__ unsigned int __RELAXED_FN_ATTRS __tzcnt_u32(unsigned int __X) { return __X ? __builtin_ctz(__X) : 32; } +/// \brief Counts the number of trailing zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TZCNT instruction. +/// +/// \param __X +/// An unsigned 32-bit integer whose trailing zeros are to be counted. +/// \returns An 32-bit integer containing the number of trailing zero bits in +/// the operand. +static __inline__ int __RELAXED_FN_ATTRS +_mm_tzcnt_32(unsigned int __X) +{ + return __X ? __builtin_ctz(__X) : 32; +} + #ifdef __x86_64__ +/// \brief Performs a bitwise AND of the second operand with the one's +/// complement of the first operand. +/// +/// \headerfile +/// +/// \code +/// unsigned long long _andn_u64 (unsigned long long a, unsigned long long b); +/// \endcode +/// +/// This intrinsic corresponds to the ANDN instruction. +/// +/// \param a +/// An unsigned 64-bit integer containing one of the operands. +/// \param b +/// An unsigned 64-bit integer containing one of the operands. +/// \returns An unsigned 64-bit integer containing the bitwise AND of the second +/// operand with the one's complement of the first operand. #define _andn_u64(a, b) (__andn_u64((a), (b))) + /* _bextr_u64 != __bextr_u64 */ +/// \brief Clears all bits in the source except for the least significant bit +/// containing a value of 1 and returns the result. +/// +/// \headerfile +/// +/// \code +/// unsigned long long _blsi_u64(unsigned long long a); +/// \endcode +/// +/// This intrinsic corresponds to the BLSI instruction. +/// +/// \param a +/// An unsigned 64-bit integer whose bits are to be cleared. +/// \returns An unsigned 64-bit integer containing the result of clearing the +/// bits from the source operand. #define _blsi_u64(a) (__blsi_u64((a))) + +/// \brief Creates a mask whose bits are set to 1, using bit 0 up to and +/// including the least siginificant bit that is set to 1 in the source +/// operand and returns the result. +/// +/// \headerfile +/// +/// \code +/// unsigned long long _blsmsk_u64(unsigned long long a); +/// \endcode +/// +/// This intrinsic corresponds to the BLSMSK instruction. +/// +/// \param a +/// An unsigned 64-bit integer used to create the mask. +/// \returns A unsigned 64-bit integer containing the newly created mask. #define _blsmsk_u64(a) (__blsmsk_u64((a))) + +/// \brief Clears the least siginificant bit that is set to 1 in the source +/// operand and returns the result. +/// +/// \headerfile +/// +/// \code +/// unsigned long long _blsr_u64(unsigned long long a); +/// \endcode +/// +/// This intrinsic corresponds to the BLSR instruction. +/// +/// \param a +/// An unsigned 64-bit integer containing the operand to be cleared. +/// \returns An unsigned 64-bit integer containing the result of clearing the +/// source operand. #define _blsr_u64(a) (__blsr_u64((a))) + +/// \brief Counts the number of trailing zero bits in the operand. +/// +/// \headerfile +/// +/// \code +/// unsigned long long _tzcnt_u64(unsigned long long a); +/// \endcode +/// +/// This intrinsic corresponds to the TZCNT instruction. +/// +/// \param a +/// An unsigned 64-bit integer whose trailing zeros are to be counted. +/// \returns An unsigned 64-bit integer containing the number of trailing zero +/// bits in the operand. #define _tzcnt_u64(a) (__tzcnt_u64((a))) +/// \brief Performs a bitwise AND of the second operand with the one's +/// complement of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the ANDN instruction. +/// +/// \param __X +/// An unsigned 64-bit integer containing one of the operands. +/// \param __Y +/// An unsigned 64-bit integer containing one of the operands. +/// \returns An unsigned 64-bit integer containing the bitwise AND of the second +/// operand with the one's complement of the first operand. static __inline__ unsigned long long __DEFAULT_FN_ATTRS __andn_u64 (unsigned long long __X, unsigned long long __Y) { @@ -109,6 +412,21 @@ __andn_u64 (unsigned long long __X, unsigned long long __Y) } /* AMD-specified, double-leading-underscore version of BEXTR */ +/// \brief Extracts the specified bits from the first operand and returns them +/// in the least significant bits of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the BEXTR instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose bits are to be extracted. +/// \param __Y +/// An unsigned 64-bit integer used to specify which bits are extracted. Bits +/// [7:0] specify the index of the least significant bit. Bits [15:8] specify +/// the number of bits to be extracted. +/// \returns An unsigned 64-bit integer whose least significant bits contain the +/// extracted bits. static __inline__ unsigned long long __DEFAULT_FN_ATTRS __bextr_u64(unsigned long long __X, unsigned long long __Y) { @@ -116,38 +434,115 @@ __bextr_u64(unsigned long long __X, unsigned long long __Y) } /* Intel-specified, single-leading-underscore version of BEXTR */ +/// \brief Extracts the specified bits from the first operand and returns them +/// in the least significant bits of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the BEXTR instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose bits are to be extracted. +/// \param __Y +/// An unsigned integer used to specify the index of the least significant +/// bit for the bits to be extracted. Bits [7:0] specify the index. +/// \param __Z +/// An unsigned integer used to specify the number of bits to be extracted. +/// Bits [7:0] specify the number of bits. +/// \returns An unsigned 64-bit integer whose least significant bits contain the +/// extracted bits. static __inline__ unsigned long long __DEFAULT_FN_ATTRS _bextr_u64(unsigned long long __X, unsigned int __Y, unsigned int __Z) { return __builtin_ia32_bextr_u64 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8))); } +/// \brief Clears all bits in the source except for the least significant bit +/// containing a value of 1 and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the BLSI instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose bits are to be cleared. +/// \returns An unsigned 64-bit integer containing the result of clearing the +/// bits from the source operand. static __inline__ unsigned long long __DEFAULT_FN_ATTRS __blsi_u64(unsigned long long __X) { return __X & -__X; } +/// \brief Creates a mask whose bits are set to 1, using bit 0 up to and +/// including the least siginificant bit that is set to 1 in the source +/// operand and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the BLSMSK instruction. +/// +/// \param __X +/// An unsigned 64-bit integer used to create the mask. +/// \returns A unsigned 64-bit integer containing the newly created mask. static __inline__ unsigned long long __DEFAULT_FN_ATTRS __blsmsk_u64(unsigned long long __X) { return __X ^ (__X - 1); } +/// \brief Clears the least siginificant bit that is set to 1 in the source +/// operand and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the BLSR instruction. +/// +/// \param __X +/// An unsigned 64-bit integer containing the operand to be cleared. +/// \returns An unsigned 64-bit integer containing the result of clearing the +/// source operand. static __inline__ unsigned long long __DEFAULT_FN_ATTRS __blsr_u64(unsigned long long __X) { return __X & (__X - 1); } -static __inline__ unsigned long long __DEFAULT_FN_ATTRS +/// \brief Counts the number of trailing zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TZCNT instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose trailing zeros are to be counted. +/// \returns An unsigned 64-bit integer containing the number of trailing zero +/// bits in the operand. +static __inline__ unsigned long long __RELAXED_FN_ATTRS __tzcnt_u64(unsigned long long __X) { return __X ? __builtin_ctzll(__X) : 64; } +/// \brief Counts the number of trailing zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TZCNT instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose trailing zeros are to be counted. +/// \returns An 64-bit integer containing the number of trailing zero bits in +/// the operand. +static __inline__ long long __RELAXED_FN_ATTRS +_mm_tzcnt_64(unsigned long long __X) +{ + return __X ? __builtin_ctzll(__X) : 64; +} + #endif /* __x86_64__ */ #undef __DEFAULT_FN_ATTRS +#undef __RELAXED_FN_ATTRS #endif /* __BMIINTRIN_H */ diff --git a/c_headers/clflushoptintrin.h b/c_headers/clflushoptintrin.h new file mode 100644 index 000000000..60e0ead76 --- /dev/null +++ b/c_headers/clflushoptintrin.h @@ -0,0 +1,41 @@ +/*===---- clflushoptintrin.h - CLFLUSHOPT intrinsic ------------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __CLFLUSHOPTINTRIN_H +#define __CLFLUSHOPTINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("clflushopt"))) + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_clflushopt(char * __m) { + __builtin_ia32_clflushopt(__m); +} + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/c_headers/cpuid.h b/c_headers/cpuid.h index 5da02e0e5..400dcfacd 100644 --- a/c_headers/cpuid.h +++ b/c_headers/cpuid.h @@ -82,6 +82,7 @@ /* Features in %ecx for level 1 */ #define bit_SSE3 0x00000001 #define bit_PCLMULQDQ 0x00000002 +#define bit_PCLMUL bit_PCLMULQDQ /* for gcc compat */ #define bit_DTES64 0x00000004 #define bit_MONITOR 0x00000008 #define bit_DSCPL 0x00000010 @@ -98,15 +99,19 @@ #define bit_PCID 0x00020000 #define bit_DCA 0x00040000 #define bit_SSE41 0x00080000 +#define bit_SSE4_1 bit_SSE41 /* for gcc compat */ #define bit_SSE42 0x00100000 +#define bit_SSE4_2 bit_SSE42 /* for gcc compat */ #define bit_x2APIC 0x00200000 #define bit_MOVBE 0x00400000 #define bit_POPCNT 0x00800000 #define bit_TSCDeadline 0x01000000 #define bit_AESNI 0x02000000 +#define bit_AES bit_AESNI /* for gcc compat */ #define bit_XSAVE 0x04000000 #define bit_OSXSAVE 0x08000000 #define bit_AVX 0x10000000 +#define bit_F16C 0x20000000 #define bit_RDRND 0x40000000 /* Features in %edx for level 1 */ @@ -119,6 +124,7 @@ #define bit_PAE 0x00000040 #define bit_MCE 0x00000080 #define bit_CX8 0x00000100 +#define bit_CMPXCHG8B bit_CX8 /* for gcc compat */ #define bit_APIC 0x00000200 #define bit_SEP 0x00000800 #define bit_MTRR 0x00001000 @@ -133,7 +139,7 @@ #define bit_ACPI 0x00400000 #define bit_MMX 0x00800000 #define bit_FXSR 0x01000000 -#define bit_FXSAVE bit_FXSR /* for gcc compat */ +#define bit_FXSAVE bit_FXSR /* for gcc compat */ #define bit_SSE 0x02000000 #define bit_SSE2 0x04000000 #define bit_SS 0x08000000 diff --git a/c_headers/cuda_wrappers/algorithm b/c_headers/cuda_wrappers/algorithm new file mode 100644 index 000000000..95d9beb73 --- /dev/null +++ b/c_headers/cuda_wrappers/algorithm @@ -0,0 +1,96 @@ +/*===---- complex - CUDA wrapper for ----------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __CLANG_CUDA_WRAPPERS_ALGORITHM +#define __CLANG_CUDA_WRAPPERS_ALGORITHM + +// This header defines __device__ overloads of std::min/max, but only if we're +// <= C++11. In C++14, these functions are constexpr, and so are implicitly +// __host__ __device__. +// +// We don't support the initializer_list overloads because +// initializer_list::begin() and end() are not __host__ __device__ functions. +// +// When compiling in C++14 mode, we could force std::min/max to have different +// implementations for host and device, by declaring the device overloads +// before the constexpr overloads appear. We choose not to do this because + +// a) why write our own implementation when we can use one from the standard +// library? and +// b) libstdc++ is evil and declares min/max inside a header that is included +// *before* we include . So we'd have to unconditionally +// declare our __device__ overloads of min/max, but that would pollute +// things for people who choose not to include . + +#include_next + +#if __cplusplus <= 201103L + +// We need to define these overloads in exactly the namespace our standard +// library uses (including the right inline namespace), otherwise they won't be +// picked up by other functions in the standard library (e.g. functions in +// ). Thus the ugliness below. +#ifdef _LIBCPP_BEGIN_NAMESPACE_STD +_LIBCPP_BEGIN_NAMESPACE_STD +#else +namespace std { +#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION +_GLIBCXX_BEGIN_NAMESPACE_VERSION +#endif +#endif + +template +inline __device__ const __T & +max(const __T &__a, const __T &__b, __Cmp __cmp) { + return __cmp(__a, __b) ? __b : __a; +} + +template +inline __device__ const __T & +max(const __T &__a, const __T &__b) { + return __a < __b ? __b : __a; +} + +template +inline __device__ const __T & +min(const __T &__a, const __T &__b, __Cmp __cmp) { + return __cmp(__b, __a) ? __b : __a; +} + +template +inline __device__ const __T & +min(const __T &__a, const __T &__b) { + return __a < __b ? __b : __a; +} + +#ifdef _LIBCPP_END_NAMESPACE_STD +_LIBCPP_END_NAMESPACE_STD +#else +#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION +_GLIBCXX_END_NAMESPACE_VERSION +#endif +} // namespace std +#endif + +#endif // __cplusplus <= 201103L +#endif // __CLANG_CUDA_WRAPPERS_ALGORITHM diff --git a/c_headers/cuda_wrappers/complex b/c_headers/cuda_wrappers/complex new file mode 100644 index 000000000..11d40a82a --- /dev/null +++ b/c_headers/cuda_wrappers/complex @@ -0,0 +1,82 @@ +/*===---- complex - CUDA wrapper for ------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __CLANG_CUDA_WRAPPERS_COMPLEX +#define __CLANG_CUDA_WRAPPERS_COMPLEX + +// Wrapper around that forces its functions to be __host__ +// __device__. + +// First, include host-only headers we think are likely to be included by +// , so that the pragma below only applies to itself. +#if __cplusplus >= 201103L +#include +#endif +#include +#include +#include + +// Next, include our wrapper, to ensure that device overloads of +// std::min/max are available. +#include + +#pragma clang force_cuda_host_device begin + +// When compiling for device, ask libstdc++ to use its own implements of +// complex functions, rather than calling builtins (which resolve to library +// functions that don't exist when compiling CUDA device code). +// +// This is a little dicey, because it causes libstdc++ to define a different +// set of overloads on host and device. +// +// // Present only when compiling for host. +// __host__ __device__ void complex sin(const complex& x) { +// return __builtin_csinf(x); +// } +// +// // Present when compiling for host and for device. +// template +// void __host__ __device__ complex sin(const complex& x) { +// return complex(sin(x.real()) * cosh(x.imag()), +// cos(x.real()), sinh(x.imag())); +// } +// +// This is safe because when compiling for device, all function calls in +// __host__ code to sin() will still resolve to *something*, even if they don't +// resolve to the same function as they resolve to when compiling for host. We +// don't care that they don't resolve to the right function because we won't +// codegen this host code when compiling for device. + +#pragma push_macro("_GLIBCXX_USE_C99_COMPLEX") +#pragma push_macro("_GLIBCXX_USE_C99_COMPLEX_TR1") +#define _GLIBCXX_USE_C99_COMPLEX 0 +#define _GLIBCXX_USE_C99_COMPLEX_TR1 0 + +#include_next + +#pragma pop_macro("_GLIBCXX_USE_C99_COMPLEX_TR1") +#pragma pop_macro("_GLIBCXX_USE_C99_COMPLEX") + +#pragma clang force_cuda_host_device end + +#endif // include guard diff --git a/c_headers/cuda_wrappers/new b/c_headers/cuda_wrappers/new new file mode 100644 index 000000000..b77131af0 --- /dev/null +++ b/c_headers/cuda_wrappers/new @@ -0,0 +1,47 @@ +/*===---- complex - CUDA wrapper for ------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __CLANG_CUDA_WRAPPERS_NEW +#define __CLANG_CUDA_WRAPPERS_NEW + +#include_next + +// Device overrides for placement new and delete. +#pragma push_macro("CUDA_NOEXCEPT") +#if __cplusplus >= 201103L +#define CUDA_NOEXCEPT noexcept +#else +#define CUDA_NOEXCEPT +#endif + +__device__ inline void *operator new(__SIZE_TYPE__, void *__ptr) CUDA_NOEXCEPT { + return __ptr; +} +__device__ inline void *operator new[](__SIZE_TYPE__, void *__ptr) CUDA_NOEXCEPT { + return __ptr; +} +__device__ inline void operator delete(void *, void *) CUDA_NOEXCEPT {} +__device__ inline void operator delete[](void *, void *) CUDA_NOEXCEPT {} +#pragma pop_macro("CUDA_NOEXCEPT") + +#endif // include guard diff --git a/c_headers/emmintrin.h b/c_headers/emmintrin.h index 656bc19d3..1512f9f0b 100644 --- a/c_headers/emmintrin.h +++ b/c_headers/emmintrin.h @@ -24,10 +24,6 @@ #ifndef __EMMINTRIN_H #define __EMMINTRIN_H -#ifndef __SSE2__ -#error "SSE2 instruction set not enabled" -#else - #include typedef double __m128d __attribute__((__vector_size__(16))); @@ -39,9 +35,35 @@ typedef long long __v2di __attribute__ ((__vector_size__ (16))); typedef short __v8hi __attribute__((__vector_size__(16))); typedef char __v16qi __attribute__((__vector_size__(16))); -/* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +/* Unsigned types */ +typedef unsigned long long __v2du __attribute__ ((__vector_size__ (16))); +typedef unsigned short __v8hu __attribute__((__vector_size__(16))); +typedef unsigned char __v16qu __attribute__((__vector_size__(16))); +/* We need an explicitly signed variant for char. Note that this shouldn't + * appear in the interface though. */ +typedef signed char __v16qs __attribute__((__vector_size__(16))); + +#include + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse2"))) + +/// \brief Adds lower double-precision values in both operands and returns the +/// sum in the lower 64 bits of the result. The upper 64 bits of the result +/// are copied from the upper double-precision value of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDSD / ADDSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the +/// sum of the lower 64 bits of both operands. The upper 64 bits are copied +/// from the upper 64 bits of the first source operand. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_add_sd(__m128d __a, __m128d __b) { @@ -49,12 +71,41 @@ _mm_add_sd(__m128d __a, __m128d __b) return __a; } +/// \brief Adds two 128-bit vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDPD / ADDPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \returns A 128-bit vector of [2 x double] containing the sums of both +/// operands. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_add_pd(__m128d __a, __m128d __b) { - return __a + __b; + return (__m128d)((__v2df)__a + (__v2df)__b); } +/// \brief Subtracts the lower double-precision value of the second operand +/// from the lower double-precision value of the first operand and returns +/// the difference in the lower 64 bits of the result. The upper 64 bits of +/// the result are copied from the upper double-precision value of the first +/// operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSUBSD / SUBSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing the minuend. +/// \param __b +/// A 128-bit vector of [2 x double] containing the subtrahend. +/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the +/// difference of the lower 64 bits of both operands. The upper 64 bits are +/// copied from the upper 64 bits of the first source operand. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sub_sd(__m128d __a, __m128d __b) { @@ -62,12 +113,40 @@ _mm_sub_sd(__m128d __a, __m128d __b) return __a; } +/// \brief Subtracts two 128-bit vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSUBPD / SUBPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing the minuend. +/// \param __b +/// A 128-bit vector of [2 x double] containing the subtrahend. +/// \returns A 128-bit vector of [2 x double] containing the differences between +/// both operands. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sub_pd(__m128d __a, __m128d __b) { - return __a - __b; + return (__m128d)((__v2df)__a - (__v2df)__b); } +/// \brief Multiplies lower double-precision values in both operands and returns +/// the product in the lower 64 bits of the result. The upper 64 bits of the +/// result are copied from the upper double-precision value of the first +/// operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMULSD / MULSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the +/// product of the lower 64 bits of both operands. The upper 64 bits are +/// copied from the upper 64 bits of the first source operand. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mul_sd(__m128d __a, __m128d __b) { @@ -75,12 +154,41 @@ _mm_mul_sd(__m128d __a, __m128d __b) return __a; } +/// \brief Multiplies two 128-bit vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMULPD / MULPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the operands. +/// \returns A 128-bit vector of [2 x double] containing the products of both +/// operands. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mul_pd(__m128d __a, __m128d __b) { - return __a * __b; + return (__m128d)((__v2df)__a * (__v2df)__b); } +/// \brief Divides the lower double-precision value of the first operand by the +/// lower double-precision value of the second operand and returns the +/// quotient in the lower 64 bits of the result. The upper 64 bits of the +/// result are copied from the upper double-precision value of the first +/// operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDIVSD / DIVSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing the dividend. +/// \param __b +/// A 128-bit vector of [2 x double] containing divisor. +/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the +/// quotient of the lower 64 bits of both operands. The upper 64 bits are +/// copied from the upper 64 bits of the first source operand. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_div_sd(__m128d __a, __m128d __b) { @@ -88,330 +196,1184 @@ _mm_div_sd(__m128d __a, __m128d __b) return __a; } +/// \brief Performs an element-by-element division of two 128-bit vectors of +/// [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDIVPD / DIVPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing the dividend. +/// \param __b +/// A 128-bit vector of [2 x double] containing the divisor. +/// \returns A 128-bit vector of [2 x double] containing the quotients of both +/// operands. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_div_pd(__m128d __a, __m128d __b) { - return __a / __b; + return (__m128d)((__v2df)__a / (__v2df)__b); } +/// \brief Calculates the square root of the lower double-precision value of +/// the second operand and returns it in the lower 64 bits of the result. +/// The upper 64 bits of the result are copied from the upper double- +/// precision value of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSQRTSD / SQRTSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the operands. The +/// upper 64 bits of this operand are copied to the upper 64 bits of the +/// result. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the operands. The +/// square root is calculated using the lower 64 bits of this operand. +/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the +/// square root of the lower 64 bits of operand \a __b, and whose upper 64 +/// bits are copied from the upper 64 bits of operand \a __a. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_sd(__m128d __a, __m128d __b) { - __m128d __c = __builtin_ia32_sqrtsd(__b); + __m128d __c = __builtin_ia32_sqrtsd((__v2df)__b); return (__m128d) { __c[0], __a[1] }; } +/// \brief Calculates the square root of the each of two values stored in a +/// 128-bit vector of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSQRTPD / SQRTPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector of [2 x double] containing the square roots of the +/// values in the operand. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_pd(__m128d __a) { - return __builtin_ia32_sqrtpd(__a); + return __builtin_ia32_sqrtpd((__v2df)__a); } +/// \brief Compares lower 64-bit double-precision values of both operands, and +/// returns the lesser of the pair of values in the lower 64-bits of the +/// result. The upper 64 bits of the result are copied from the upper double- +/// precision value of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMINSD / MINSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the operands. The +/// lower 64 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the operands. The +/// lower 64 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the +/// minimum value between both operands. The upper 64 bits are copied from +/// the upper 64 bits of the first source operand. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_sd(__m128d __a, __m128d __b) { - return __builtin_ia32_minsd(__a, __b); + return __builtin_ia32_minsd((__v2df)__a, (__v2df)__b); } +/// \brief Performs element-by-element comparison of the two 128-bit vectors of +/// [2 x double] and returns the vector containing the lesser of each pair of +/// values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMINPD / MINPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the operands. +/// \returns A 128-bit vector of [2 x double] containing the minimum values +/// between both operands. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_pd(__m128d __a, __m128d __b) { - return __builtin_ia32_minpd(__a, __b); + return __builtin_ia32_minpd((__v2df)__a, (__v2df)__b); } +/// \brief Compares lower 64-bits double-precision values of both operands, and +/// returns the greater of the pair of values in the lower 64-bits of the +/// result. The upper 64 bits of the result are copied from the upper double- +/// precision value of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMAXSD / MAXSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the operands. The +/// lower 64 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the operands. The +/// lower 64 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the +/// maximum value between both operands. The upper 64 bits are copied from +/// the upper 64 bits of the first source operand. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_sd(__m128d __a, __m128d __b) { - return __builtin_ia32_maxsd(__a, __b); + return __builtin_ia32_maxsd((__v2df)__a, (__v2df)__b); } +/// \brief Performs element-by-element comparison of the two 128-bit vectors of +/// [2 x double] and returns the vector containing the greater of each pair +/// of values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMAXPD / MAXPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the operands. +/// \returns A 128-bit vector of [2 x double] containing the maximum values +/// between both operands. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_pd(__m128d __a, __m128d __b) { - return __builtin_ia32_maxpd(__a, __b); + return __builtin_ia32_maxpd((__v2df)__a, (__v2df)__b); } +/// \brief Performs a bitwise AND of two 128-bit vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPAND / PAND instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \returns A 128-bit vector of [2 x double] containing the bitwise AND of the +/// values between both operands. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_and_pd(__m128d __a, __m128d __b) { - return (__m128d)((__v4si)__a & (__v4si)__b); + return (__m128d)((__v2du)__a & (__v2du)__b); } +/// \brief Performs a bitwise AND of two 128-bit vectors of [2 x double], using +/// the one's complement of the values contained in the first source operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPANDN / PANDN instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing the left source operand. The +/// one's complement of this value is used in the bitwise AND. +/// \param __b +/// A 128-bit vector of [2 x double] containing the right source operand. +/// \returns A 128-bit vector of [2 x double] containing the bitwise AND of the +/// values in the second operand and the one's complement of the first +/// operand. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_andnot_pd(__m128d __a, __m128d __b) { - return (__m128d)(~(__v4si)__a & (__v4si)__b); + return (__m128d)(~(__v2du)__a & (__v2du)__b); } +/// \brief Performs a bitwise OR of two 128-bit vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPOR / POR instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \returns A 128-bit vector of [2 x double] containing the bitwise OR of the +/// values between both operands. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_or_pd(__m128d __a, __m128d __b) { - return (__m128d)((__v4si)__a | (__v4si)__b); + return (__m128d)((__v2du)__a | (__v2du)__b); } +/// \brief Performs a bitwise XOR of two 128-bit vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPXOR / PXOR instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \returns A 128-bit vector of [2 x double] containing the bitwise XOR of the +/// values between both operands. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_xor_pd(__m128d __a, __m128d __b) { - return (__m128d)((__v4si)__a ^ (__v4si)__b); + return (__m128d)((__v2du)__a ^ (__v2du)__b); } +/// \brief Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] for equality. Each comparison yields 0h +/// for false, FFFFFFFFFFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPEQPD / CMPEQPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_pd(__m128d __a, __m128d __b) { - return (__m128d)__builtin_ia32_cmpeqpd(__a, __b); + return (__m128d)__builtin_ia32_cmpeqpd((__v2df)__a, (__v2df)__b); } +/// \brief Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are less than those in the second operand. Each comparison +/// yields 0h for false, FFFFFFFFFFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLTPD / CMPLTPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_pd(__m128d __a, __m128d __b) { - return (__m128d)__builtin_ia32_cmpltpd(__a, __b); + return (__m128d)__builtin_ia32_cmpltpd((__v2df)__a, (__v2df)__b); } +/// \brief Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are less than or equal to those in the second operand. Each +/// comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLEPD / CMPLEPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_pd(__m128d __a, __m128d __b) { - return (__m128d)__builtin_ia32_cmplepd(__a, __b); + return (__m128d)__builtin_ia32_cmplepd((__v2df)__a, (__v2df)__b); } +/// \brief Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are greater than those in the second operand. Each comparison +/// yields 0h for false, FFFFFFFFFFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLTPD / CMPLTPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_pd(__m128d __a, __m128d __b) { - return (__m128d)__builtin_ia32_cmpltpd(__b, __a); + return (__m128d)__builtin_ia32_cmpltpd((__v2df)__b, (__v2df)__a); } +/// \brief Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are greater than or equal to those in the second operand. Each +/// comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLEPD / CMPLEPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_pd(__m128d __a, __m128d __b) { - return (__m128d)__builtin_ia32_cmplepd(__b, __a); + return (__m128d)__builtin_ia32_cmplepd((__v2df)__b, (__v2df)__a); } +/// \brief Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are ordered with respect to those in the second operand. A pair +/// of double-precision values are "ordered" with respect to each other if +/// neither value is a NaN. Each comparison yields 0h for false, +/// FFFFFFFFFFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPORDPD / CMPORDPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_pd(__m128d __a, __m128d __b) { - return (__m128d)__builtin_ia32_cmpordpd(__a, __b); + return (__m128d)__builtin_ia32_cmpordpd((__v2df)__a, (__v2df)__b); } +/// \brief Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are unordered with respect to those in the second operand. A pair +/// of double-precision values are "unordered" with respect to each other if +/// one or both values are NaN. Each comparison yields 0h for false, +/// FFFFFFFFFFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPUNORDPD / CMPUNORDPD +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_pd(__m128d __a, __m128d __b) { - return (__m128d)__builtin_ia32_cmpunordpd(__a, __b); + return (__m128d)__builtin_ia32_cmpunordpd((__v2df)__a, (__v2df)__b); } +/// \brief Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are unequal to those in the second operand. Each comparison +/// yields 0h for false, FFFFFFFFFFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNEQPD / CMPNEQPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_pd(__m128d __a, __m128d __b) { - return (__m128d)__builtin_ia32_cmpneqpd(__a, __b); + return (__m128d)__builtin_ia32_cmpneqpd((__v2df)__a, (__v2df)__b); } +/// \brief Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are not less than those in the second operand. Each comparison +/// yields 0h for false, FFFFFFFFFFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLTPD / CMPNLTPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_pd(__m128d __a, __m128d __b) { - return (__m128d)__builtin_ia32_cmpnltpd(__a, __b); + return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__a, (__v2df)__b); } +/// \brief Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are not less than or equal to those in the second operand. Each +/// comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLEPD / CMPNLEPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_pd(__m128d __a, __m128d __b) { - return (__m128d)__builtin_ia32_cmpnlepd(__a, __b); + return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__a, (__v2df)__b); } +/// \brief Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are not greater than those in the second operand. Each +/// comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLTPD / CMPNLTPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_pd(__m128d __a, __m128d __b) { - return (__m128d)__builtin_ia32_cmpnltpd(__b, __a); + return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__b, (__v2df)__a); } +/// \brief Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are not greater than or equal to those in the second operand. +/// Each comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLEPD / CMPNLEPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_pd(__m128d __a, __m128d __b) { - return (__m128d)__builtin_ia32_cmpnlepd(__b, __a); + return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__b, (__v2df)__a); } +/// \brief Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] for equality. The +/// comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPEQSD / CMPEQSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_sd(__m128d __a, __m128d __b) { - return (__m128d)__builtin_ia32_cmpeqsd(__a, __b); + return (__m128d)__builtin_ia32_cmpeqsd((__v2df)__a, (__v2df)__b); } +/// \brief Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is less than the corresponding value in +/// the second parameter. The comparison yields 0h for false, +/// FFFFFFFFFFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLTSD / CMPLTSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_sd(__m128d __a, __m128d __b) { - return (__m128d)__builtin_ia32_cmpltsd(__a, __b); + return (__m128d)__builtin_ia32_cmpltsd((__v2df)__a, (__v2df)__b); } +/// \brief Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is less than or equal to the +/// corresponding value in the second parameter. The comparison yields 0h for +/// false, FFFFFFFFFFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLESD / CMPLESD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_sd(__m128d __a, __m128d __b) { - return (__m128d)__builtin_ia32_cmplesd(__a, __b); + return (__m128d)__builtin_ia32_cmplesd((__v2df)__a, (__v2df)__b); } +/// \brief Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is greater than the corresponding value +/// in the second parameter. The comparison yields 0h for false, +/// FFFFFFFFFFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLTSD / CMPLTSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_sd(__m128d __a, __m128d __b) { - __m128d __c = __builtin_ia32_cmpltsd(__b, __a); + __m128d __c = __builtin_ia32_cmpltsd((__v2df)__b, (__v2df)__a); return (__m128d) { __c[0], __a[1] }; } +/// \brief Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is greater than or equal to the +/// corresponding value in the second parameter. The comparison yields 0h for +/// false, FFFFFFFFFFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLESD / CMPLESD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_sd(__m128d __a, __m128d __b) { - __m128d __c = __builtin_ia32_cmplesd(__b, __a); + __m128d __c = __builtin_ia32_cmplesd((__v2df)__b, (__v2df)__a); return (__m128d) { __c[0], __a[1] }; } +/// \brief Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is "ordered" with respect to the +/// corresponding value in the second parameter. The comparison yields 0h for +/// false, FFFFFFFFFFFFFFFFh for true. A pair of double-precision values are +/// "ordered" with respect to each other if neither value is a NaN. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPORDSD / CMPORDSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_sd(__m128d __a, __m128d __b) { - return (__m128d)__builtin_ia32_cmpordsd(__a, __b); + return (__m128d)__builtin_ia32_cmpordsd((__v2df)__a, (__v2df)__b); } +/// \brief Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is "unordered" with respect to the +/// corresponding value in the second parameter. The comparison yields 0h +/// for false, FFFFFFFFFFFFFFFFh for true. A pair of double-precision values +/// are "unordered" with respect to each other if one or both values are NaN. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPUNORDSD / CMPUNORDSD +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_sd(__m128d __a, __m128d __b) { - return (__m128d)__builtin_ia32_cmpunordsd(__a, __b); + return (__m128d)__builtin_ia32_cmpunordsd((__v2df)__a, (__v2df)__b); } +/// \brief Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is unequal to the corresponding value in +/// the second parameter. The comparison yields 0h for false, +/// FFFFFFFFFFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNEQSD / CMPNEQSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_sd(__m128d __a, __m128d __b) { - return (__m128d)__builtin_ia32_cmpneqsd(__a, __b); + return (__m128d)__builtin_ia32_cmpneqsd((__v2df)__a, (__v2df)__b); } +/// \brief Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is not less than the corresponding +/// value in the second parameter. The comparison yields 0h for false, +/// FFFFFFFFFFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLTSD / CMPNLTSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_sd(__m128d __a, __m128d __b) { - return (__m128d)__builtin_ia32_cmpnltsd(__a, __b); + return (__m128d)__builtin_ia32_cmpnltsd((__v2df)__a, (__v2df)__b); } +/// \brief Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is not less than or equal to the +/// corresponding value in the second parameter. The comparison yields 0h +/// for false, FFFFFFFFFFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLESD / CMPNLESD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_sd(__m128d __a, __m128d __b) { - return (__m128d)__builtin_ia32_cmpnlesd(__a, __b); + return (__m128d)__builtin_ia32_cmpnlesd((__v2df)__a, (__v2df)__b); } +/// \brief Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is not greater than the corresponding +/// value in the second parameter. The comparison yields 0h for false, +/// FFFFFFFFFFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLTSD / CMPNLTSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_sd(__m128d __a, __m128d __b) { - __m128d __c = __builtin_ia32_cmpnltsd(__b, __a); + __m128d __c = __builtin_ia32_cmpnltsd((__v2df)__b, (__v2df)__a); return (__m128d) { __c[0], __a[1] }; } +/// \brief Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is not greater than or equal to the +/// corresponding value in the second parameter. The comparison yields 0h +/// for false, FFFFFFFFFFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLESD / CMPNLESD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_sd(__m128d __a, __m128d __b) { - __m128d __c = __builtin_ia32_cmpnlesd(__b, __a); + __m128d __c = __builtin_ia32_cmpnlesd((__v2df)__b, (__v2df)__a); return (__m128d) { __c[0], __a[1] }; } +/// \brief Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] for equality. The +/// comparison yields 0 for false, 1 for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISD / COMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_sd(__m128d __a, __m128d __b) { - return __builtin_ia32_comisdeq(__a, __b); + return __builtin_ia32_comisdeq((__v2df)__a, (__v2df)__b); } +/// \brief Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is less than the corresponding value in +/// the second parameter. The comparison yields 0 for false, 1 for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISD / COMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_sd(__m128d __a, __m128d __b) { - return __builtin_ia32_comisdlt(__a, __b); + return __builtin_ia32_comisdlt((__v2df)__a, (__v2df)__b); } +/// \brief Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is less than or equal to the +/// corresponding value in the second parameter. The comparison yields 0 for +/// false, 1 for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISD / COMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_sd(__m128d __a, __m128d __b) { - return __builtin_ia32_comisdle(__a, __b); + return __builtin_ia32_comisdle((__v2df)__a, (__v2df)__b); } +/// \brief Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is greater than the corresponding value +/// in the second parameter. The comparison yields 0 for false, 1 for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISD / COMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_sd(__m128d __a, __m128d __b) { - return __builtin_ia32_comisdgt(__a, __b); + return __builtin_ia32_comisdgt((__v2df)__a, (__v2df)__b); } +/// \brief Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is greater than or equal to the +/// corresponding value in the second parameter. The comparison yields 0 for +/// false, 1 for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISD / COMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_sd(__m128d __a, __m128d __b) { - return __builtin_ia32_comisdge(__a, __b); + return __builtin_ia32_comisdge((__v2df)__a, (__v2df)__b); } +/// \brief Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is unequal to the corresponding value in +/// the second parameter. The comparison yields 0 for false, 1 for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISD / COMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comineq_sd(__m128d __a, __m128d __b) { - return __builtin_ia32_comisdneq(__a, __b); + return __builtin_ia32_comisdneq((__v2df)__a, (__v2df)__b); } +/// \brief Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] for equality. The +/// comparison yields 0 for false, 1 for true. If either of the two lower +/// double-precision values is NaN, 1 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISD / UCOMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. If either of the two +/// lower double-precision values is NaN, 1 is returned. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_sd(__m128d __a, __m128d __b) { - return __builtin_ia32_ucomisdeq(__a, __b); + return __builtin_ia32_ucomisdeq((__v2df)__a, (__v2df)__b); } +/// \brief Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is less than the corresponding value in +/// the second parameter. The comparison yields 0 for false, 1 for true. If +/// either of the two lower double-precision values is NaN, 1 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISD / UCOMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. If either of the two +/// lower double-precision values is NaN, 1 is returned. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_sd(__m128d __a, __m128d __b) { - return __builtin_ia32_ucomisdlt(__a, __b); + return __builtin_ia32_ucomisdlt((__v2df)__a, (__v2df)__b); } +/// \brief Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is less than or equal to the +/// corresponding value in the second parameter. The comparison yields 0 for +/// false, 1 for true. If either of the two lower double-precision values is +/// NaN, 1 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISD / UCOMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. If either of the two +/// lower double-precision values is NaN, 1 is returned. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_sd(__m128d __a, __m128d __b) { - return __builtin_ia32_ucomisdle(__a, __b); + return __builtin_ia32_ucomisdle((__v2df)__a, (__v2df)__b); } +/// \brief Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is greater than the corresponding value +/// in the second parameter. The comparison yields 0 for false, 1 for true. +/// If either of the two lower double-precision values is NaN, 0 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISD / UCOMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. If either of the two +/// lower double-precision values is NaN, 0 is returned. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_sd(__m128d __a, __m128d __b) { - return __builtin_ia32_ucomisdgt(__a, __b); + return __builtin_ia32_ucomisdgt((__v2df)__a, (__v2df)__b); } +/// \brief Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is greater than or equal to the +/// corresponding value in the second parameter. The comparison yields 0 for +/// false, 1 for true. If either of the two lower double-precision values +/// is NaN, 0 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISD / UCOMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. If either of the two +/// lower double-precision values is NaN, 0 is returned. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_sd(__m128d __a, __m128d __b) { - return __builtin_ia32_ucomisdge(__a, __b); + return __builtin_ia32_ucomisdge((__v2df)__a, (__v2df)__b); } +/// \brief Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is unequal to the corresponding value in +/// the second parameter. The comparison yields 0 for false, 1 for true. If +/// either of the two lower double-precision values is NaN, 0 is returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISD / UCOMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison result. If either of the two +/// lower double-precision values is NaN, 0 is returned. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomineq_sd(__m128d __a, __m128d __b) { - return __builtin_ia32_ucomisdneq(__a, __b); + return __builtin_ia32_ucomisdneq((__v2df)__a, (__v2df)__b); } +/// \brief Converts the two double-precision floating-point elements of a +/// 128-bit vector of [2 x double] into two single-precision floating-point +/// values, returned in the lower 64 bits of a 128-bit vector of [4 x float]. +/// The upper 64 bits of the result vector are set to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPD2PS / CVTPD2PS instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the +/// converted values. The upper 64 bits are set to zero. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtpd_ps(__m128d __a) { - return __builtin_ia32_cvtpd2ps(__a); + return __builtin_ia32_cvtpd2ps((__v2df)__a); } +/// \brief Converts the lower two single-precision floating-point elements of a +/// 128-bit vector of [4 x float] into two double-precision floating-point +/// values, returned in a 128-bit vector of [2 x double]. The upper two +/// elements of the input vector are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPS2PD / CVTPS2PD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower two single-precision +/// floating-point elements are converted to double-precision values. The +/// upper two elements are unused. +/// \returns A 128-bit vector of [2 x double] containing the converted values. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtps_pd(__m128 __a) { - return __builtin_ia32_cvtps2pd(__a); + return (__m128d) __builtin_convertvector( + __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1), __v2df); } +/// \brief Converts the lower two integer elements of a 128-bit vector of +/// [4 x i32] into two double-precision floating-point values, returned in a +/// 128-bit vector of [2 x double]. The upper two elements of the input +/// vector are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTDQ2PD / CVTDQ2PD instruction. +/// +/// \param __a +/// A 128-bit integer vector of [4 x i32]. The lower two integer elements are +/// converted to double-precision values. The upper two elements are unused. +/// \returns A 128-bit vector of [2 x double] containing the converted values. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtepi32_pd(__m128i __a) { - return __builtin_ia32_cvtdq2pd((__v4si)__a); + return (__m128d) __builtin_convertvector( + __builtin_shufflevector((__v4si)__a, (__v4si)__a, 0, 1), __v2df); } +/// \brief Converts the two double-precision floating-point elements of a +/// 128-bit vector of [2 x double] into two signed 32-bit integer values, +/// returned in the lower 64 bits of a 128-bit vector of [4 x i32]. The upper +/// 64 bits of the result vector are set to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPD2DQ / CVTPD2DQ instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the +/// converted values. The upper 64 bits are set to zero. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtpd_epi32(__m128d __a) { - return __builtin_ia32_cvtpd2dq(__a); + return __builtin_ia32_cvtpd2dq((__v2df)__a); } +/// \brief Converts the low-order element of a 128-bit vector of [2 x double] +/// into a 32-bit signed integer value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSD2SI / CVTSD2SI instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the +/// conversion. +/// \returns A 32-bit signed integer containing the converted value. static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtsd_si32(__m128d __a) { - return __builtin_ia32_cvtsd2si(__a); + return __builtin_ia32_cvtsd2si((__v2df)__a); } +/// \brief Converts the lower double-precision floating-point element of a +/// 128-bit vector of [2 x double], in the second parameter, into a +/// single-precision floating-point value, returned in the lower 32 bits of a +/// 128-bit vector of [4 x float]. The upper 96 bits of the result vector are +/// copied from the upper 96 bits of the first parameter. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSD2SS / CVTSD2SS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The upper 96 bits of this parameter are +/// copied to the upper 96 bits of the result. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision +/// floating-point element is used in the conversion. +/// \returns A 128-bit vector of [4 x float]. The lower 32 bits contain the +/// converted value from the second parameter. The upper 96 bits are copied +/// from the upper 96 bits of the first parameter. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtsd_ss(__m128 __a, __m128d __b) { - __a[0] = __b[0]; - return __a; + return (__m128)__builtin_ia32_cvtsd2ss((__v4sf)__a, (__v2df)__b); } +/// \brief Converts a 32-bit signed integer value, in the second parameter, into +/// a double-precision floating-point value, returned in the lower 64 bits of +/// a 128-bit vector of [2 x double]. The upper 64 bits of the result vector +/// are copied from the upper 64 bits of the first parameter. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSI2SD / CVTSI2SD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The upper 64 bits of this parameter are +/// copied to the upper 64 bits of the result. +/// \param __b +/// A 32-bit signed integer containing the value to be converted. +/// \returns A 128-bit vector of [2 x double]. The lower 64 bits contain the +/// converted value from the second parameter. The upper 64 bits are copied +/// from the upper 64 bits of the first parameter. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtsi32_sd(__m128d __a, int __b) { @@ -419,6 +1381,25 @@ _mm_cvtsi32_sd(__m128d __a, int __b) return __a; } +/// \brief Converts the lower single-precision floating-point element of a +/// 128-bit vector of [4 x float], in the second parameter, into a +/// double-precision floating-point value, returned in the lower 64 bits of +/// a 128-bit vector of [2 x double]. The upper 64 bits of the result vector +/// are copied from the upper 64 bits of the first parameter. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSS2SD / CVTSS2SD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The upper 64 bits of this parameter are +/// copied to the upper 64 bits of the result. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower single-precision +/// floating-point element is used in the conversion. +/// \returns A 128-bit vector of [2 x double]. The lower 64 bits contain the +/// converted value from the second parameter. The upper 64 bits are copied +/// from the upper 64 bits of the first parameter. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtss_sd(__m128d __a, __m128 __b) { @@ -426,48 +1407,145 @@ _mm_cvtss_sd(__m128d __a, __m128 __b) return __a; } +/// \brief Converts the two double-precision floating-point elements of a +/// 128-bit vector of [2 x double] into two signed 32-bit integer values, +/// returned in the lower 64 bits of a 128-bit vector of [4 x i32]. If the +/// result of either conversion is inexact, the result is truncated (rounded +/// towards zero) regardless of the current MXCSR setting. The upper 64 bits +/// of the result vector are set to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTPD2DQ / CVTTPD2DQ +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the +/// converted values. The upper 64 bits are set to zero. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttpd_epi32(__m128d __a) { - return (__m128i)__builtin_ia32_cvttpd2dq(__a); + return (__m128i)__builtin_ia32_cvttpd2dq((__v2df)__a); } +/// \brief Converts the low-order element of a [2 x double] vector into a 32-bit +/// signed integer value, truncating the result when it is inexact. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTSD2SI / CVTTSD2SI +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the +/// conversion. +/// \returns A 32-bit signed integer containing the converted value. static __inline__ int __DEFAULT_FN_ATTRS _mm_cvttsd_si32(__m128d __a) { - return __a[0]; + return __builtin_ia32_cvttsd2si((__v2df)__a); } +/// \brief Converts the two double-precision floating-point elements of a +/// 128-bit vector of [2 x double] into two signed 32-bit integer values, +/// returned in a 64-bit vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPD2PI instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 64-bit vector of [2 x i32] containing the converted values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_cvtpd_pi32(__m128d __a) { - return (__m64)__builtin_ia32_cvtpd2pi(__a); + return (__m64)__builtin_ia32_cvtpd2pi((__v2df)__a); } +/// \brief Converts the two double-precision floating-point elements of a +/// 128-bit vector of [2 x double] into two signed 32-bit integer values, +/// returned in a 64-bit vector of [2 x i32]. If the result of either +/// conversion is inexact, the result is truncated (rounded towards zero) +/// regardless of the current MXCSR setting. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTTPD2PI instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 64-bit vector of [2 x i32] containing the converted values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_cvttpd_pi32(__m128d __a) { - return (__m64)__builtin_ia32_cvttpd2pi(__a); + return (__m64)__builtin_ia32_cvttpd2pi((__v2df)__a); } +/// \brief Converts the two signed 32-bit integer elements of a 64-bit vector of +/// [2 x i32] into two double-precision floating-point values, returned in a +/// 128-bit vector of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPI2PD instruction. +/// +/// \param __a +/// A 64-bit vector of [2 x i32]. +/// \returns A 128-bit vector of [2 x double] containing the converted values. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtpi32_pd(__m64 __a) { return __builtin_ia32_cvtpi2pd((__v2si)__a); } +/// \brief Returns the low-order element of a 128-bit vector of [2 x double] as +/// a double-precision floating-point value. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower 64 bits are returned. +/// \returns A double-precision floating-point value copied from the lower 64 +/// bits of \a __a. static __inline__ double __DEFAULT_FN_ATTRS _mm_cvtsd_f64(__m128d __a) { return __a[0]; } +/// \brief Loads a 128-bit floating-point vector of [2 x double] from an aligned +/// memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPD / MOVAPD instruction. +/// +/// \param __dp +/// A pointer to a 128-bit memory location. The address of the memory +/// location has to be 16-byte aligned. +/// \returns A 128-bit vector of [2 x double] containing the loaded values. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load_pd(double const *__dp) { return *(__m128d*)__dp; } +/// \brief Loads a double-precision floating-point value from a specified memory +/// location and duplicates it to both vector elements of a 128-bit vector of +/// [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDDUP / MOVDDUP instruction. +/// +/// \param __dp +/// A pointer to a memory location containing a double-precision value. +/// \returns A 128-bit vector of [2 x double] containing the loaded and +/// duplicated values. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load1_pd(double const *__dp) { @@ -480,13 +1558,38 @@ _mm_load1_pd(double const *__dp) #define _mm_load_pd1(dp) _mm_load1_pd(dp) +/// \brief Loads two double-precision values, in reverse order, from an aligned +/// memory location into a 128-bit vector of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPD / MOVAPD instruction + +/// needed shuffling instructions. In AVX mode, the shuffling may be combined +/// with the \c VMOVAPD, resulting in only a \c VPERMILPD instruction. +/// +/// \param __dp +/// A 16-byte aligned pointer to an array of double-precision values to be +/// loaded in reverse order. +/// \returns A 128-bit vector of [2 x double] containing the reversed loaded +/// values. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadr_pd(double const *__dp) { __m128d __u = *(__m128d*)__dp; - return __builtin_shufflevector(__u, __u, 1, 0); + return __builtin_shufflevector((__v2df)__u, (__v2df)__u, 1, 0); } +/// \brief Loads a 128-bit floating-point vector of [2 x double] from an +/// unaligned memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPD / MOVUPD instruction. +/// +/// \param __dp +/// A pointer to a 128-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \returns A 128-bit vector of [2 x double] containing the loaded values. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadu_pd(double const *__dp) { @@ -496,6 +1599,16 @@ _mm_loadu_pd(double const *__dp) return ((struct __loadu_pd*)__dp)->__v; } +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_loadu_si64(void const *__a) +{ + struct __loadu_si64 { + long long __v; + } __attribute__((__packed__, __may_alias__)); + long long __u = ((struct __loadu_si64*)__a)->__v; + return (__m128i){__u, 0L}; +} + static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load_sd(double const *__dp) { @@ -506,6 +1619,23 @@ _mm_load_sd(double const *__dp) return (__m128d){ __u, 0 }; } +/// \brief Loads a double-precision value into the high-order bits of a 128-bit +/// vector of [2 x double]. The low-order bits are copied from the low-order +/// bits of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVHPD / MOVHPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. \n +/// Bits [63:0] are written to bits [63:0] of the result. +/// \param __dp +/// A pointer to a 64-bit memory location containing a double-precision +/// floating-point value that is loaded. The loaded value is written to bits +/// [127:64] of the result. The address of the memory location does not have +/// to be aligned. +/// \returns A 128-bit vector of [2 x double] containing the moved values. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadh_pd(__m128d __a, double const *__dp) { @@ -516,6 +1646,23 @@ _mm_loadh_pd(__m128d __a, double const *__dp) return (__m128d){ __a[0], __u }; } +/// \brief Loads a double-precision value into the low-order bits of a 128-bit +/// vector of [2 x double]. The high-order bits are copied from the +/// high-order bits of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVLPD / MOVLPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. \n +/// Bits [127:64] are written to bits [127:64] of the result. +/// \param __dp +/// A pointer to a 64-bit memory location containing a double-precision +/// floating-point value that is loaded. The loaded value is written to bits +/// [63:0] of the result. The address of the memory location does not have to +/// be aligned. +/// \returns A 128-bit vector of [2 x double] containing the moved values. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadl_pd(__m128d __a, double const *__dp) { @@ -526,42 +1673,149 @@ _mm_loadl_pd(__m128d __a, double const *__dp) return (__m128d){ __u, __a[1] }; } +/// \brief Constructs a 128-bit floating-point vector of [2 x double] with +/// unspecified content. This could be used as an argument to another +/// intrinsic function where the argument is required but the value is not +/// actually used. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \returns A 128-bit floating-point vector of [2 x double] with unspecified +/// content. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_undefined_pd(void) +{ + return (__m128d)__builtin_ia32_undef128(); +} + +/// \brief Constructs a 128-bit floating-point vector of [2 x double]. The lower +/// 64 bits of the vector are initialized with the specified double-precision +/// floating-point value. The upper 64 bits are set to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVQ / MOVQ instruction. +/// +/// \param __w +/// A double-precision floating-point value used to initialize the lower 64 +/// bits of the result. +/// \returns An initialized 128-bit floating-point vector of [2 x double]. The +/// lower 64 bits contain the value of the parameter. The upper 64 bits are +/// set to zero. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_sd(double __w) { return (__m128d){ __w, 0 }; } +/// \brief Constructs a 128-bit floating-point vector of [2 x double], with each +/// of the two double-precision floating-point vector elements set to the +/// specified double-precision floating-point value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDDUP / MOVLHPS instruction. +/// +/// \param __w +/// A double-precision floating-point value used to initialize each vector +/// element of the result. +/// \returns An initialized 128-bit floating-point vector of [2 x double]. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set1_pd(double __w) { return (__m128d){ __w, __w }; } +/// \brief Constructs a 128-bit floating-point vector of [2 x double] +/// initialized with the specified double-precision floating-point values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPD / UNPCKLPD instruction. +/// +/// \param __w +/// A double-precision floating-point value used to initialize the upper 64 +/// bits of the result. +/// \param __x +/// A double-precision floating-point value used to initialize the lower 64 +/// bits of the result. +/// \returns An initialized 128-bit floating-point vector of [2 x double]. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_pd(double __w, double __x) { return (__m128d){ __x, __w }; } +/// \brief Constructs a 128-bit floating-point vector of [2 x double], +/// initialized in reverse order with the specified double-precision +/// floating-point values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPD / UNPCKLPD instruction. +/// +/// \param __w +/// A double-precision floating-point value used to initialize the lower 64 +/// bits of the result. +/// \param __x +/// A double-precision floating-point value used to initialize the upper 64 +/// bits of the result. +/// \returns An initialized 128-bit floating-point vector of [2 x double]. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_setr_pd(double __w, double __x) { return (__m128d){ __w, __x }; } +/// \brief Constructs a 128-bit floating-point vector of [2 x double] +/// initialized to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPS / XORPS instruction. +/// +/// \returns An initialized 128-bit floating-point vector of [2 x double] with +/// all elements set to zero. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_setzero_pd(void) { return (__m128d){ 0, 0 }; } +/// \brief Constructs a 128-bit floating-point vector of [2 x double]. The lower +/// 64 bits are set to the lower 64 bits of the second parameter. The upper +/// 64 bits are set to the upper 64 bits of the first parameter. +// +/// \headerfile +/// +/// This intrinsic corresponds to the VBLENDPD / BLENDPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The upper 64 bits are written to the +/// upper 64 bits of the result. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower 64 bits are written to the +/// lower 64 bits of the result. +/// \returns A 128-bit vector of [2 x double] containing the moved values. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_move_sd(__m128d __a, __m128d __b) { return (__m128d){ __b[0], __a[1] }; } +/// \brief Stores the lower 64 bits of a 128-bit vector of [2 x double] to a +/// memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSD / MOVSD instruction. +/// +/// \param __dp +/// A pointer to a 64-bit memory location. +/// \param __a +/// A 128-bit vector of [2 x double] containing the value to be stored. static __inline__ void __DEFAULT_FN_ATTRS _mm_store_sd(double *__dp, __m128d __a) { @@ -572,34 +1826,89 @@ _mm_store_sd(double *__dp, __m128d __a) } static __inline__ void __DEFAULT_FN_ATTRS -_mm_store1_pd(double *__dp, __m128d __a) +_mm_store_pd(double *__dp, __m128d __a) { - struct __mm_store1_pd_struct { - double __u[2]; - } __attribute__((__packed__, __may_alias__)); - ((struct __mm_store1_pd_struct*)__dp)->__u[0] = __a[0]; - ((struct __mm_store1_pd_struct*)__dp)->__u[1] = __a[0]; + *(__m128d*)__dp = __a; } static __inline__ void __DEFAULT_FN_ATTRS -_mm_store_pd(double *__dp, __m128d __a) +_mm_store1_pd(double *__dp, __m128d __a) { - *(__m128d *)__dp = __a; + __a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0); + _mm_store_pd(__dp, __a); } +/// \brief Stores a 128-bit vector of [2 x double] into an aligned memory +/// location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPD / MOVAPD instruction. +/// +/// \param __dp +/// A pointer to a 128-bit memory location. The address of the memory +/// location has to be 16-byte aligned. +/// \param __a +/// A 128-bit vector of [2 x double] containing the values to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_store_pd1(double *__dp, __m128d __a) +{ + return _mm_store1_pd(__dp, __a); +} + +/// \brief Stores a 128-bit vector of [2 x double] into an unaligned memory +/// location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPD / MOVUPD instruction. +/// +/// \param __dp +/// A pointer to a 128-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \param __a +/// A 128-bit vector of [2 x double] containing the values to be stored. static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_pd(double *__dp, __m128d __a) { - __builtin_ia32_storeupd(__dp, __a); + struct __storeu_pd { + __m128d __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_pd*)__dp)->__v = __a; } +/// \brief Stores two double-precision values, in reverse order, from a 128-bit +/// vector of [2 x double] to a 16-byte aligned memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to a shuffling instruction followed by a +/// VMOVAPD / MOVAPD instruction. +/// +/// \param __dp +/// A pointer to a 16-byte aligned memory location that can store two +/// double-precision values. +/// \param __a +/// A 128-bit vector of [2 x double] containing the values to be reversed and +/// stored. static __inline__ void __DEFAULT_FN_ATTRS _mm_storer_pd(double *__dp, __m128d __a) { - __a = __builtin_shufflevector(__a, __a, 1, 0); + __a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 1, 0); *(__m128d *)__dp = __a; } +/// \brief Stores the upper 64 bits of a 128-bit vector of [2 x double] to a +/// memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVHPD / MOVHPD instruction. +/// +/// \param __dp +/// A pointer to a 64-bit memory location. +/// \param __a +/// A 128-bit vector of [2 x double] containing the value to be stored. static __inline__ void __DEFAULT_FN_ATTRS _mm_storeh_pd(double *__dp, __m128d __a) { @@ -609,6 +1918,17 @@ _mm_storeh_pd(double *__dp, __m128d __a) ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[1]; } +/// \brief Stores the lower 64 bits of a 128-bit vector of [2 x double] to a +/// memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVLPD / MOVLPD instruction. +/// +/// \param __dp +/// A pointer to a 64-bit memory location. +/// \param __a +/// A 128-bit vector of [2 x double] containing the value to be stored. static __inline__ void __DEFAULT_FN_ATTRS _mm_storel_pd(double *__dp, __m128d __a) { @@ -618,409 +1938,1266 @@ _mm_storel_pd(double *__dp, __m128d __a) ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[0]; } +/// \brief Adds the corresponding elements of two 128-bit vectors of [16 x i8], +/// saving the lower 8 bits of each sum in the corresponding element of a +/// 128-bit result vector of [16 x i8]. The integer elements of both +/// parameters can be either signed or unsigned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPADDB / PADDB instruction. +/// +/// \param __a +/// A 128-bit vector of [16 x i8]. +/// \param __b +/// A 128-bit vector of [16 x i8]. +/// \returns A 128-bit vector of [16 x i8] containing the sums of both +/// parameters. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi8(__m128i __a, __m128i __b) { - return (__m128i)((__v16qi)__a + (__v16qi)__b); + return (__m128i)((__v16qu)__a + (__v16qu)__b); } +/// \brief Adds the corresponding elements of two 128-bit vectors of [8 x i16], +/// saving the lower 16 bits of each sum in the corresponding element of a +/// 128-bit result vector of [8 x i16]. The integer elements of both +/// parameters can be either signed or unsigned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPADDW / PADDW instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16]. +/// \param __b +/// A 128-bit vector of [8 x i16]. +/// \returns A 128-bit vector of [8 x i16] containing the sums of both +/// parameters. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi16(__m128i __a, __m128i __b) { - return (__m128i)((__v8hi)__a + (__v8hi)__b); + return (__m128i)((__v8hu)__a + (__v8hu)__b); } +/// \brief Adds the corresponding elements of two 128-bit vectors of [4 x i32], +/// saving the lower 32 bits of each sum in the corresponding element of a +/// 128-bit result vector of [4 x i32]. The integer elements of both +/// parameters can be either signed or unsigned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPADDD / PADDD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x i32]. +/// \param __b +/// A 128-bit vector of [4 x i32]. +/// \returns A 128-bit vector of [4 x i32] containing the sums of both +/// parameters. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi32(__m128i __a, __m128i __b) { - return (__m128i)((__v4si)__a + (__v4si)__b); + return (__m128i)((__v4su)__a + (__v4su)__b); } +/// \brief Adds two signed or unsigned 64-bit integer values, returning the +/// lower 64 bits of the sum. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PADDQ instruction. +/// +/// \param __a +/// A 64-bit integer. +/// \param __b +/// A 64-bit integer. +/// \returns A 64-bit integer containing the sum of both parameters. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_add_si64(__m64 __a, __m64 __b) { - return __a + __b; + return (__m64)__builtin_ia32_paddq((__v1di)__a, (__v1di)__b); } +/// \brief Adds the corresponding elements of two 128-bit vectors of [2 x i64], +/// saving the lower 64 bits of each sum in the corresponding element of a +/// 128-bit result vector of [2 x i64]. The integer elements of both +/// parameters can be either signed or unsigned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPADDQ / PADDQ instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x i64]. +/// \param __b +/// A 128-bit vector of [2 x i64]. +/// \returns A 128-bit vector of [2 x i64] containing the sums of both +/// parameters. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi64(__m128i __a, __m128i __b) { - return __a + __b; + return (__m128i)((__v2du)__a + (__v2du)__b); } +/// \brief Adds, with saturation, the corresponding elements of two 128-bit +/// signed [16 x i8] vectors, saving each sum in the corresponding element of +/// a 128-bit result vector of [16 x i8]. Positive sums greater than 7Fh are +/// saturated to 7Fh. Negative sums less than 80h are saturated to 80h. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPADDSB / PADDSB instruction. +/// +/// \param __a +/// A 128-bit signed [16 x i8] vector. +/// \param __b +/// A 128-bit signed [16 x i8] vector. +/// \returns A 128-bit signed [16 x i8] vector containing the saturated sums of +/// both parameters. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi8(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_paddsb128((__v16qi)__a, (__v16qi)__b); } +/// \brief Adds, with saturation, the corresponding elements of two 128-bit +/// signed [8 x i16] vectors, saving each sum in the corresponding element of +/// a 128-bit result vector of [8 x i16]. Positive sums greater than 7FFFh +/// are saturated to 7FFFh. Negative sums less than 8000h are saturated to +/// 8000h. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPADDSW / PADDSW instruction. +/// +/// \param __a +/// A 128-bit signed [8 x i16] vector. +/// \param __b +/// A 128-bit signed [8 x i16] vector. +/// \returns A 128-bit signed [8 x i16] vector containing the saturated sums of +/// both parameters. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi16(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_paddsw128((__v8hi)__a, (__v8hi)__b); } +/// \brief Adds, with saturation, the corresponding elements of two 128-bit +/// unsigned [16 x i8] vectors, saving each sum in the corresponding element +/// of a 128-bit result vector of [16 x i8]. Positive sums greater than FFh +/// are saturated to FFh. Negative sums are saturated to 00h. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPADDUSB / PADDUSB instruction. +/// +/// \param __a +/// A 128-bit unsigned [16 x i8] vector. +/// \param __b +/// A 128-bit unsigned [16 x i8] vector. +/// \returns A 128-bit unsigned [16 x i8] vector containing the saturated sums +/// of both parameters. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu8(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_paddusb128((__v16qi)__a, (__v16qi)__b); } +/// \brief Adds, with saturation, the corresponding elements of two 128-bit +/// unsigned [8 x i16] vectors, saving each sum in the corresponding element +/// of a 128-bit result vector of [8 x i16]. Positive sums greater than FFFFh +/// are saturated to FFFFh. Negative sums are saturated to 0000h. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPADDUSB / PADDUSB instruction. +/// +/// \param __a +/// A 128-bit unsigned [8 x i16] vector. +/// \param __b +/// A 128-bit unsigned [8 x i16] vector. +/// \returns A 128-bit unsigned [8 x i16] vector containing the saturated sums +/// of both parameters. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu16(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_paddusw128((__v8hi)__a, (__v8hi)__b); } +/// \brief Computes the rounded avarages of corresponding elements of two +/// 128-bit unsigned [16 x i8] vectors, saving each result in the +/// corresponding element of a 128-bit result vector of [16 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPAVGB / PAVGB instruction. +/// +/// \param __a +/// A 128-bit unsigned [16 x i8] vector. +/// \param __b +/// A 128-bit unsigned [16 x i8] vector. +/// \returns A 128-bit unsigned [16 x i8] vector containing the rounded +/// averages of both parameters. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_avg_epu8(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_pavgb128((__v16qi)__a, (__v16qi)__b); } +/// \brief Computes the rounded avarages of corresponding elements of two +/// 128-bit unsigned [8 x i16] vectors, saving each result in the +/// corresponding element of a 128-bit result vector of [8 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPAVGW / PAVGW instruction. +/// +/// \param __a +/// A 128-bit unsigned [8 x i16] vector. +/// \param __b +/// A 128-bit unsigned [8 x i16] vector. +/// \returns A 128-bit unsigned [8 x i16] vector containing the rounded +/// averages of both parameters. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_avg_epu16(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_pavgw128((__v8hi)__a, (__v8hi)__b); } +/// \brief Multiplies the corresponding elements of two 128-bit signed [8 x i16] +/// vectors, producing eight intermediate 32-bit signed integer products, and +/// adds the consecutive pairs of 32-bit products to form a 128-bit signed +/// [4 x i32] vector. For example, bits [15:0] of both parameters are +/// multiplied producing a 32-bit product, bits [31:16] of both parameters +/// are multiplied producing a 32-bit product, and the sum of those two +/// products becomes bits [31:0] of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMADDWD / PMADDWD instruction. +/// +/// \param __a +/// A 128-bit signed [8 x i16] vector. +/// \param __b +/// A 128-bit signed [8 x i16] vector. +/// \returns A 128-bit signed [4 x i32] vector containing the sums of products +/// of both parameters. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_madd_epi16(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)__a, (__v8hi)__b); } +/// \brief Compares corresponding elements of two 128-bit signed [8 x i16] +/// vectors, saving the greater value from each comparison in the +/// corresponding element of a 128-bit result vector of [8 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMAXSW / PMAXSW instruction. +/// +/// \param __a +/// A 128-bit signed [8 x i16] vector. +/// \param __b +/// A 128-bit signed [8 x i16] vector. +/// \returns A 128-bit signed [8 x i16] vector containing the greater value of +/// each comparison. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi16(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_pmaxsw128((__v8hi)__a, (__v8hi)__b); } +/// \brief Compares corresponding elements of two 128-bit unsigned [16 x i8] +/// vectors, saving the greater value from each comparison in the +/// corresponding element of a 128-bit result vector of [16 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMAXUB / PMAXUB instruction. +/// +/// \param __a +/// A 128-bit unsigned [16 x i8] vector. +/// \param __b +/// A 128-bit unsigned [16 x i8] vector. +/// \returns A 128-bit unsigned [16 x i8] vector containing the greater value of +/// each comparison. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu8(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_pmaxub128((__v16qi)__a, (__v16qi)__b); } +/// \brief Compares corresponding elements of two 128-bit signed [8 x i16] +/// vectors, saving the smaller value from each comparison in the +/// corresponding element of a 128-bit result vector of [8 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMINSW / PMINSW instruction. +/// +/// \param __a +/// A 128-bit signed [8 x i16] vector. +/// \param __b +/// A 128-bit signed [8 x i16] vector. +/// \returns A 128-bit signed [8 x i16] vector containing the smaller value of +/// each comparison. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi16(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_pminsw128((__v8hi)__a, (__v8hi)__b); } +/// \brief Compares corresponding elements of two 128-bit unsigned [16 x i8] +/// vectors, saving the smaller value from each comparison in the +/// corresponding element of a 128-bit result vector of [16 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMINUB / PMINUB instruction. +/// +/// \param __a +/// A 128-bit unsigned [16 x i8] vector. +/// \param __b +/// A 128-bit unsigned [16 x i8] vector. +/// \returns A 128-bit unsigned [16 x i8] vector containing the smaller value of +/// each comparison. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu8(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_pminub128((__v16qi)__a, (__v16qi)__b); } +/// \brief Multiplies the corresponding elements of two signed [8 x i16] +/// vectors, saving the upper 16 bits of each 32-bit product in the +/// corresponding element of a 128-bit signed [8 x i16] result vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMULHW / PMULHW instruction. +/// +/// \param __a +/// A 128-bit signed [8 x i16] vector. +/// \param __b +/// A 128-bit signed [8 x i16] vector. +/// \returns A 128-bit signed [8 x i16] vector containing the upper 16 bits of +/// each of the eight 32-bit products. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epi16(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_pmulhw128((__v8hi)__a, (__v8hi)__b); } +/// \brief Multiplies the corresponding elements of two unsigned [8 x i16] +/// vectors, saving the upper 16 bits of each 32-bit product in the +/// corresponding element of a 128-bit unsigned [8 x i16] result vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMULHUW / PMULHUW instruction. +/// +/// \param __a +/// A 128-bit unsigned [8 x i16] vector. +/// \param __b +/// A 128-bit unsigned [8 x i16] vector. +/// \returns A 128-bit unsigned [8 x i16] vector containing the upper 16 bits +/// of each of the eight 32-bit products. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epu16(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)__a, (__v8hi)__b); } +/// \brief Multiplies the corresponding elements of two signed [8 x i16] +/// vectors, saving the lower 16 bits of each 32-bit product in the +/// corresponding element of a 128-bit signed [8 x i16] result vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMULLW / PMULLW instruction. +/// +/// \param __a +/// A 128-bit signed [8 x i16] vector. +/// \param __b +/// A 128-bit signed [8 x i16] vector. +/// \returns A 128-bit signed [8 x i16] vector containing the lower 16 bits of +/// each of the eight 32-bit products. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi16(__m128i __a, __m128i __b) { - return (__m128i)((__v8hi)__a * (__v8hi)__b); + return (__m128i)((__v8hu)__a * (__v8hu)__b); } +/// \brief Multiplies 32-bit unsigned integer values contained in the lower bits +/// of the two 64-bit integer vectors and returns the 64-bit unsigned +/// product. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMULUDQ instruction. +/// +/// \param __a +/// A 64-bit integer containing one of the source operands. +/// \param __b +/// A 64-bit integer containing one of the source operands. +/// \returns A 64-bit integer vector containing the product of both operands. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_mul_su32(__m64 __a, __m64 __b) { return __builtin_ia32_pmuludq((__v2si)__a, (__v2si)__b); } +/// \brief Multiplies 32-bit unsigned integer values contained in the lower +/// bits of the corresponding elements of two [2 x i64] vectors, and returns +/// the 64-bit products in the corresponding elements of a [2 x i64] vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMULUDQ / PMULUDQ instruction. +/// +/// \param __a +/// A [2 x i64] vector containing one of the source operands. +/// \param __b +/// A [2 x i64] vector containing one of the source operands. +/// \returns A [2 x i64] vector containing the product of both operands. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epu32(__m128i __a, __m128i __b) { return __builtin_ia32_pmuludq128((__v4si)__a, (__v4si)__b); } +/// \brief Computes the absolute differences of corresponding 8-bit integer +/// values in two 128-bit vectors. Sums the first 8 absolute differences, and +/// separately sums the second 8 absolute differences. Packss these two +/// unsigned 16-bit integer sums into the upper and lower elements of a +/// [2 x i64] vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSADBW / PSADBW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing one of the source operands. +/// \param __b +/// A 128-bit integer vector containing one of the source operands. +/// \returns A [2 x i64] vector containing the sums of the sets of absolute +/// differences between both operands. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sad_epu8(__m128i __a, __m128i __b) { return __builtin_ia32_psadbw128((__v16qi)__a, (__v16qi)__b); } +/// \brief Subtracts the corresponding 8-bit integer values in the operands. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSUBB / PSUBB instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the minuends. +/// \param __b +/// A 128-bit integer vector containing the subtrahends. +/// \returns A 128-bit integer vector containing the differences of the values +/// in the operands. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi8(__m128i __a, __m128i __b) { - return (__m128i)((__v16qi)__a - (__v16qi)__b); + return (__m128i)((__v16qu)__a - (__v16qu)__b); } +/// \brief Subtracts the corresponding 16-bit integer values in the operands. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSUBW / PSUBW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the minuends. +/// \param __b +/// A 128-bit integer vector containing the subtrahends. +/// \returns A 128-bit integer vector containing the differences of the values +/// in the operands. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi16(__m128i __a, __m128i __b) { - return (__m128i)((__v8hi)__a - (__v8hi)__b); + return (__m128i)((__v8hu)__a - (__v8hu)__b); } +/// \brief Subtracts the corresponding 32-bit integer values in the operands. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSUBD / PSUBD instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the minuends. +/// \param __b +/// A 128-bit integer vector containing the subtrahends. +/// \returns A 128-bit integer vector containing the differences of the values +/// in the operands. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi32(__m128i __a, __m128i __b) { - return (__m128i)((__v4si)__a - (__v4si)__b); + return (__m128i)((__v4su)__a - (__v4su)__b); } +/// \brief Subtracts signed or unsigned 64-bit integer values and writes the +/// difference to the corresponding bits in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSUBQ instruction. +/// +/// \param __a +/// A 64-bit integer vector containing the minuend. +/// \param __b +/// A 64-bit integer vector containing the subtrahend. +/// \returns A 64-bit integer vector containing the difference of the values in +/// the operands. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_sub_si64(__m64 __a, __m64 __b) { - return __a - __b; + return (__m64)__builtin_ia32_psubq((__v1di)__a, (__v1di)__b); } +/// \brief Subtracts the corresponding elements of two [2 x i64] vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSUBQ / PSUBQ instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the minuends. +/// \param __b +/// A 128-bit integer vector containing the subtrahends. +/// \returns A 128-bit integer vector containing the differences of the values +/// in the operands. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi64(__m128i __a, __m128i __b) { - return __a - __b; + return (__m128i)((__v2du)__a - (__v2du)__b); } +/// \brief Subtracts corresponding 8-bit signed integer values in the input and +/// returns the differences in the corresponding bytes in the destination. +/// Differences greater than 7Fh are saturated to 7Fh, and differences less +/// than 80h are saturated to 80h. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSUBSB / PSUBSB instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the minuends. +/// \param __b +/// A 128-bit integer vector containing the subtrahends. +/// \returns A 128-bit integer vector containing the differences of the values +/// in the operands. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi8(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_psubsb128((__v16qi)__a, (__v16qi)__b); } +/// \brief Subtracts corresponding 16-bit signed integer values in the input and +/// returns the differences in the corresponding bytes in the destination. +/// Differences greater than 7FFFh are saturated to 7FFFh, and values less +/// than 8000h are saturated to 8000h. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSUBSW / PSUBSW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the minuends. +/// \param __b +/// A 128-bit integer vector containing the subtrahends. +/// \returns A 128-bit integer vector containing the differences of the values +/// in the operands. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi16(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_psubsw128((__v8hi)__a, (__v8hi)__b); } +/// \brief Subtracts corresponding 8-bit unsigned integer values in the input +/// and returns the differences in the corresponding bytes in the +/// destination. Differences less than 00h are saturated to 00h. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSUBUSB / PSUBUSB instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the minuends. +/// \param __b +/// A 128-bit integer vector containing the subtrahends. +/// \returns A 128-bit integer vector containing the unsigned integer +/// differences of the values in the operands. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu8(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_psubusb128((__v16qi)__a, (__v16qi)__b); } +/// \brief Subtracts corresponding 16-bit unsigned integer values in the input +/// and returns the differences in the corresponding bytes in the +/// destination. Differences less than 0000h are saturated to 0000h. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSUBUSW / PSUBUSW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the minuends. +/// \param __b +/// A 128-bit integer vector containing the subtrahends. +/// \returns A 128-bit integer vector containing the unsigned integer +/// differences of the values in the operands. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu16(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_psubusw128((__v8hi)__a, (__v8hi)__b); } +/// \brief Performs a bitwise AND of two 128-bit integer vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPAND / PAND instruction. +/// +/// \param __a +/// A 128-bit integer vector containing one of the source operands. +/// \param __b +/// A 128-bit integer vector containing one of the source operands. +/// \returns A 128-bit integer vector containing the bitwise AND of the values +/// in both operands. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_and_si128(__m128i __a, __m128i __b) { - return __a & __b; + return (__m128i)((__v2du)__a & (__v2du)__b); } +/// \brief Performs a bitwise AND of two 128-bit integer vectors, using the +/// one's complement of the values contained in the first source operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPANDN / PANDN instruction. +/// +/// \param __a +/// A 128-bit vector containing the left source operand. The one's complement +/// of this value is used in the bitwise AND. +/// \param __b +/// A 128-bit vector containing the right source operand. +/// \returns A 128-bit integer vector containing the bitwise AND of the one's +/// complement of the first operand and the values in the second operand. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_andnot_si128(__m128i __a, __m128i __b) { - return ~__a & __b; + return (__m128i)(~(__v2du)__a & (__v2du)__b); } - +/// \brief Performs a bitwise OR of two 128-bit integer vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPOR / POR instruction. +/// +/// \param __a +/// A 128-bit integer vector containing one of the source operands. +/// \param __b +/// A 128-bit integer vector containing one of the source operands. +/// \returns A 128-bit integer vector containing the bitwise OR of the values +/// in both operands. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_or_si128(__m128i __a, __m128i __b) { - return __a | __b; + return (__m128i)((__v2du)__a | (__v2du)__b); } +/// \brief Performs a bitwise exclusive OR of two 128-bit integer vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPXOR / PXOR instruction. +/// +/// \param __a +/// A 128-bit integer vector containing one of the source operands. +/// \param __b +/// A 128-bit integer vector containing one of the source operands. +/// \returns A 128-bit integer vector containing the bitwise exclusive OR of the +/// values in both operands. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_xor_si128(__m128i __a, __m128i __b) { - return __a ^ __b; + return (__m128i)((__v2du)__a ^ (__v2du)__b); } -#define _mm_slli_si128(a, imm) __extension__ ({ \ - (__m128i)__builtin_shufflevector((__v16qi)_mm_setzero_si128(), \ - (__v16qi)(__m128i)(a), \ - ((imm)&0xF0) ? 0 : 16 - ((imm)&0xF), \ - ((imm)&0xF0) ? 0 : 17 - ((imm)&0xF), \ - ((imm)&0xF0) ? 0 : 18 - ((imm)&0xF), \ - ((imm)&0xF0) ? 0 : 19 - ((imm)&0xF), \ - ((imm)&0xF0) ? 0 : 20 - ((imm)&0xF), \ - ((imm)&0xF0) ? 0 : 21 - ((imm)&0xF), \ - ((imm)&0xF0) ? 0 : 22 - ((imm)&0xF), \ - ((imm)&0xF0) ? 0 : 23 - ((imm)&0xF), \ - ((imm)&0xF0) ? 0 : 24 - ((imm)&0xF), \ - ((imm)&0xF0) ? 0 : 25 - ((imm)&0xF), \ - ((imm)&0xF0) ? 0 : 26 - ((imm)&0xF), \ - ((imm)&0xF0) ? 0 : 27 - ((imm)&0xF), \ - ((imm)&0xF0) ? 0 : 28 - ((imm)&0xF), \ - ((imm)&0xF0) ? 0 : 29 - ((imm)&0xF), \ - ((imm)&0xF0) ? 0 : 30 - ((imm)&0xF), \ - ((imm)&0xF0) ? 0 : 31 - ((imm)&0xF)); }) +/// \brief Left-shifts the 128-bit integer vector operand by the specified +/// number of bytes. Low-order bits are cleared. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_slli_si128(__m128i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VPSLLDQ / PSLLDQ instruction. +/// +/// \param a +/// A 128-bit integer vector containing the source operand. +/// \param imm +/// An immediate value specifying the number of bytes to left-shift operand +/// \a a. +/// \returns A 128-bit integer vector containing the left-shifted value. +#define _mm_slli_si128(a, imm) __extension__ ({ \ + (__m128i)__builtin_shufflevector( \ + (__v16qi)_mm_setzero_si128(), \ + (__v16qi)(__m128i)(a), \ + ((char)(imm)&0xF0) ? 0 : 16 - (char)(imm), \ + ((char)(imm)&0xF0) ? 1 : 17 - (char)(imm), \ + ((char)(imm)&0xF0) ? 2 : 18 - (char)(imm), \ + ((char)(imm)&0xF0) ? 3 : 19 - (char)(imm), \ + ((char)(imm)&0xF0) ? 4 : 20 - (char)(imm), \ + ((char)(imm)&0xF0) ? 5 : 21 - (char)(imm), \ + ((char)(imm)&0xF0) ? 6 : 22 - (char)(imm), \ + ((char)(imm)&0xF0) ? 7 : 23 - (char)(imm), \ + ((char)(imm)&0xF0) ? 8 : 24 - (char)(imm), \ + ((char)(imm)&0xF0) ? 9 : 25 - (char)(imm), \ + ((char)(imm)&0xF0) ? 10 : 26 - (char)(imm), \ + ((char)(imm)&0xF0) ? 11 : 27 - (char)(imm), \ + ((char)(imm)&0xF0) ? 12 : 28 - (char)(imm), \ + ((char)(imm)&0xF0) ? 13 : 29 - (char)(imm), \ + ((char)(imm)&0xF0) ? 14 : 30 - (char)(imm), \ + ((char)(imm)&0xF0) ? 15 : 31 - (char)(imm)); }) #define _mm_bslli_si128(a, imm) \ _mm_slli_si128((a), (imm)) +/// \brief Left-shifts each 16-bit value in the 128-bit integer vector operand +/// by the specified number of bits. Low-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSLLW / PSLLW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// An integer value specifying the number of bits to left-shift each value +/// in operand \a __a. +/// \returns A 128-bit integer vector containing the left-shifted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi16(__m128i __a, int __count) { return (__m128i)__builtin_ia32_psllwi128((__v8hi)__a, __count); } +/// \brief Left-shifts each 16-bit value in the 128-bit integer vector operand +/// by the specified number of bits. Low-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSLLW / PSLLW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// A 128-bit integer vector in which bits [63:0] specify the number of bits +/// to left-shift each value in operand \a __a. +/// \returns A 128-bit integer vector containing the left-shifted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi16(__m128i __a, __m128i __count) { return (__m128i)__builtin_ia32_psllw128((__v8hi)__a, (__v8hi)__count); } +/// \brief Left-shifts each 32-bit value in the 128-bit integer vector operand +/// by the specified number of bits. Low-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSLLD / PSLLD instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// An integer value specifying the number of bits to left-shift each value +/// in operand \a __a. +/// \returns A 128-bit integer vector containing the left-shifted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi32(__m128i __a, int __count) { return (__m128i)__builtin_ia32_pslldi128((__v4si)__a, __count); } +/// \brief Left-shifts each 32-bit value in the 128-bit integer vector operand +/// by the specified number of bits. Low-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSLLD / PSLLD instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// A 128-bit integer vector in which bits [63:0] specify the number of bits +/// to left-shift each value in operand \a __a. +/// \returns A 128-bit integer vector containing the left-shifted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi32(__m128i __a, __m128i __count) { return (__m128i)__builtin_ia32_pslld128((__v4si)__a, (__v4si)__count); } +/// \brief Left-shifts each 64-bit value in the 128-bit integer vector operand +/// by the specified number of bits. Low-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSLLQ / PSLLQ instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// An integer value specifying the number of bits to left-shift each value +/// in operand \a __a. +/// \returns A 128-bit integer vector containing the left-shifted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi64(__m128i __a, int __count) { - return __builtin_ia32_psllqi128(__a, __count); + return __builtin_ia32_psllqi128((__v2di)__a, __count); } +/// \brief Left-shifts each 64-bit value in the 128-bit integer vector operand +/// by the specified number of bits. Low-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSLLQ / PSLLQ instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// A 128-bit integer vector in which bits [63:0] specify the number of bits +/// to left-shift each value in operand \a __a. +/// \returns A 128-bit integer vector containing the left-shifted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi64(__m128i __a, __m128i __count) { - return __builtin_ia32_psllq128(__a, __count); + return __builtin_ia32_psllq128((__v2di)__a, (__v2di)__count); } +/// \brief Right-shifts each 16-bit value in the 128-bit integer vector operand +/// by the specified number of bits. High-order bits are filled with the sign +/// bit of the initial value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRAW / PSRAW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// An integer value specifying the number of bits to right-shift each value +/// in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi16(__m128i __a, int __count) { return (__m128i)__builtin_ia32_psrawi128((__v8hi)__a, __count); } +/// \brief Right-shifts each 16-bit value in the 128-bit integer vector operand +/// by the specified number of bits. High-order bits are filled with the sign +/// bit of the initial value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRAW / PSRAW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// A 128-bit integer vector in which bits [63:0] specify the number of bits +/// to right-shift each value in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi16(__m128i __a, __m128i __count) { return (__m128i)__builtin_ia32_psraw128((__v8hi)__a, (__v8hi)__count); } +/// \brief Right-shifts each 32-bit value in the 128-bit integer vector operand +/// by the specified number of bits. High-order bits are filled with the sign +/// bit of the initial value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRAD / PSRAD instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// An integer value specifying the number of bits to right-shift each value +/// in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi32(__m128i __a, int __count) { return (__m128i)__builtin_ia32_psradi128((__v4si)__a, __count); } +/// \brief Right-shifts each 32-bit value in the 128-bit integer vector operand +/// by the specified number of bits. High-order bits are filled with the sign +/// bit of the initial value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRAD / PSRAD instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// A 128-bit integer vector in which bits [63:0] specify the number of bits +/// to right-shift each value in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi32(__m128i __a, __m128i __count) { return (__m128i)__builtin_ia32_psrad128((__v4si)__a, (__v4si)__count); } -#define _mm_srli_si128(a, imm) __extension__ ({ \ - (__m128i)__builtin_shufflevector((__v16qi)(__m128i)(a), \ - (__v16qi)_mm_setzero_si128(), \ - ((imm)&0xF0) ? 16 : ((imm)&0xF) + 0, \ - ((imm)&0xF0) ? 16 : ((imm)&0xF) + 1, \ - ((imm)&0xF0) ? 16 : ((imm)&0xF) + 2, \ - ((imm)&0xF0) ? 16 : ((imm)&0xF) + 3, \ - ((imm)&0xF0) ? 16 : ((imm)&0xF) + 4, \ - ((imm)&0xF0) ? 16 : ((imm)&0xF) + 5, \ - ((imm)&0xF0) ? 16 : ((imm)&0xF) + 6, \ - ((imm)&0xF0) ? 16 : ((imm)&0xF) + 7, \ - ((imm)&0xF0) ? 16 : ((imm)&0xF) + 8, \ - ((imm)&0xF0) ? 16 : ((imm)&0xF) + 9, \ - ((imm)&0xF0) ? 16 : ((imm)&0xF) + 10, \ - ((imm)&0xF0) ? 16 : ((imm)&0xF) + 11, \ - ((imm)&0xF0) ? 16 : ((imm)&0xF) + 12, \ - ((imm)&0xF0) ? 16 : ((imm)&0xF) + 13, \ - ((imm)&0xF0) ? 16 : ((imm)&0xF) + 14, \ - ((imm)&0xF0) ? 16 : ((imm)&0xF) + 15); }) +/// \brief Right-shifts the 128-bit integer vector operand by the specified +/// number of bytes. High-order bits are cleared. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_srli_si128(__m128i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VPSRLDQ / PSRLDQ instruction. +/// +/// \param a +/// A 128-bit integer vector containing the source operand. +/// \param imm +/// An immediate value specifying the number of bytes to right-shift operand +/// \a a. +/// \returns A 128-bit integer vector containing the right-shifted value. +#define _mm_srli_si128(a, imm) __extension__ ({ \ + (__m128i)__builtin_shufflevector( \ + (__v16qi)(__m128i)(a), \ + (__v16qi)_mm_setzero_si128(), \ + ((char)(imm)&0xF0) ? 16 : (char)(imm) + 0, \ + ((char)(imm)&0xF0) ? 17 : (char)(imm) + 1, \ + ((char)(imm)&0xF0) ? 18 : (char)(imm) + 2, \ + ((char)(imm)&0xF0) ? 19 : (char)(imm) + 3, \ + ((char)(imm)&0xF0) ? 20 : (char)(imm) + 4, \ + ((char)(imm)&0xF0) ? 21 : (char)(imm) + 5, \ + ((char)(imm)&0xF0) ? 22 : (char)(imm) + 6, \ + ((char)(imm)&0xF0) ? 23 : (char)(imm) + 7, \ + ((char)(imm)&0xF0) ? 24 : (char)(imm) + 8, \ + ((char)(imm)&0xF0) ? 25 : (char)(imm) + 9, \ + ((char)(imm)&0xF0) ? 26 : (char)(imm) + 10, \ + ((char)(imm)&0xF0) ? 27 : (char)(imm) + 11, \ + ((char)(imm)&0xF0) ? 28 : (char)(imm) + 12, \ + ((char)(imm)&0xF0) ? 29 : (char)(imm) + 13, \ + ((char)(imm)&0xF0) ? 30 : (char)(imm) + 14, \ + ((char)(imm)&0xF0) ? 31 : (char)(imm) + 15); }) #define _mm_bsrli_si128(a, imm) \ _mm_srli_si128((a), (imm)) +/// \brief Right-shifts each of 16-bit values in the 128-bit integer vector +/// operand by the specified number of bits. High-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRLW / PSRLW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// An integer value specifying the number of bits to right-shift each value +/// in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi16(__m128i __a, int __count) { return (__m128i)__builtin_ia32_psrlwi128((__v8hi)__a, __count); } +/// \brief Right-shifts each of 16-bit values in the 128-bit integer vector +/// operand by the specified number of bits. High-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRLW / PSRLW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// A 128-bit integer vector in which bits [63:0] specify the number of bits +/// to right-shift each value in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi16(__m128i __a, __m128i __count) { return (__m128i)__builtin_ia32_psrlw128((__v8hi)__a, (__v8hi)__count); } +/// \brief Right-shifts each of 32-bit values in the 128-bit integer vector +/// operand by the specified number of bits. High-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRLD / PSRLD instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// An integer value specifying the number of bits to right-shift each value +/// in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi32(__m128i __a, int __count) { return (__m128i)__builtin_ia32_psrldi128((__v4si)__a, __count); } +/// \brief Right-shifts each of 32-bit values in the 128-bit integer vector +/// operand by the specified number of bits. High-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRLD / PSRLD instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// A 128-bit integer vector in which bits [63:0] specify the number of bits +/// to right-shift each value in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi32(__m128i __a, __m128i __count) { return (__m128i)__builtin_ia32_psrld128((__v4si)__a, (__v4si)__count); } +/// \brief Right-shifts each of 64-bit values in the 128-bit integer vector +/// operand by the specified number of bits. High-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRLQ / PSRLQ instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// An integer value specifying the number of bits to right-shift each value +/// in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi64(__m128i __a, int __count) { - return __builtin_ia32_psrlqi128(__a, __count); + return __builtin_ia32_psrlqi128((__v2di)__a, __count); } +/// \brief Right-shifts each of 64-bit values in the 128-bit integer vector +/// operand by the specified number of bits. High-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRLQ / PSRLQ instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// A 128-bit integer vector in which bits [63:0] specify the number of bits +/// to right-shift each value in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi64(__m128i __a, __m128i __count) { - return __builtin_ia32_psrlq128(__a, __count); + return __builtin_ia32_psrlq128((__v2di)__a, (__v2di)__count); } +/// \brief Compares each of the corresponding 8-bit values of the 128-bit +/// integer vectors for equality. Each comparison yields 0h for false, FFh +/// for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPEQB / PCMPEQB instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi8(__m128i __a, __m128i __b) { return (__m128i)((__v16qi)__a == (__v16qi)__b); } +/// \brief Compares each of the corresponding 16-bit values of the 128-bit +/// integer vectors for equality. Each comparison yields 0h for false, FFFFh +/// for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPEQW / PCMPEQW instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi16(__m128i __a, __m128i __b) { return (__m128i)((__v8hi)__a == (__v8hi)__b); } +/// \brief Compares each of the corresponding 32-bit values of the 128-bit +/// integer vectors for equality. Each comparison yields 0h for false, +/// FFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPEQD / PCMPEQD instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi32(__m128i __a, __m128i __b) { return (__m128i)((__v4si)__a == (__v4si)__b); } +/// \brief Compares each of the corresponding signed 8-bit values of the 128-bit +/// integer vectors to determine if the values in the first operand are +/// greater than those in the second operand. Each comparison yields 0h for +/// false, FFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPGTB / PCMPGTB instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi8(__m128i __a, __m128i __b) { /* This function always performs a signed comparison, but __v16qi is a char - which may be signed or unsigned. */ - typedef signed char __v16qs __attribute__((__vector_size__(16))); + which may be signed or unsigned, so use __v16qs. */ return (__m128i)((__v16qs)__a > (__v16qs)__b); } +/// \brief Compares each of the corresponding signed 16-bit values of the +/// 128-bit integer vectors to determine if the values in the first operand +/// are greater than those in the second operand. Each comparison yields 0h +/// for false, FFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPGTW / PCMPGTW instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi16(__m128i __a, __m128i __b) { return (__m128i)((__v8hi)__a > (__v8hi)__b); } +/// \brief Compares each of the corresponding signed 32-bit values of the +/// 128-bit integer vectors to determine if the values in the first operand +/// are greater than those in the second operand. Each comparison yields 0h +/// for false, FFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPGTD / PCMPGTD instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi32(__m128i __a, __m128i __b) { return (__m128i)((__v4si)__a > (__v4si)__b); } +/// \brief Compares each of the corresponding signed 8-bit values of the 128-bit +/// integer vectors to determine if the values in the first operand are less +/// than those in the second operand. Each comparison yields 0h for false, +/// FFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPGTB / PCMPGTB instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi8(__m128i __a, __m128i __b) { return _mm_cmpgt_epi8(__b, __a); } +/// \brief Compares each of the corresponding signed 16-bit values of the +/// 128-bit integer vectors to determine if the values in the first operand +/// are less than those in the second operand. Each comparison yields 0h for +/// false, FFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPGTW / PCMPGTW instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi16(__m128i __a, __m128i __b) { return _mm_cmpgt_epi16(__b, __a); } +/// \brief Compares each of the corresponding signed 32-bit values of the +/// 128-bit integer vectors to determine if the values in the first operand +/// are less than those in the second operand. Each comparison yields 0h for +/// false, FFFFFFFFh for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPGTD / PCMPGTD instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi32(__m128i __a, __m128i __b) { @@ -1028,6 +3205,23 @@ _mm_cmplt_epi32(__m128i __a, __m128i __b) } #ifdef __x86_64__ +/// \brief Converts a 64-bit signed integer value from the second operand into a +/// double-precision value and returns it in the lower element of a [2 x +/// double] vector; the upper element of the returned vector is copied from +/// the upper element of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSI2SD / CVTSI2SD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The upper 64 bits of this operand are +/// copied to the upper 64 bits of the destination. +/// \param __b +/// A 64-bit signed integer operand containing the value to be converted. +/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the +/// converted value of the second operand. The upper 64 bits are copied from +/// the upper 64 bits of the first operand. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtsi64_sd(__m128d __a, long long __b) { @@ -1035,37 +3229,100 @@ _mm_cvtsi64_sd(__m128d __a, long long __b) return __a; } +/// \brief Converts the first (lower) element of a vector of [2 x double] into a +/// 64-bit signed integer value, according to the current rounding mode. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSD2SI / CVTSD2SI instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the +/// conversion. +/// \returns A 64-bit signed integer containing the converted value. static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvtsd_si64(__m128d __a) { - return __builtin_ia32_cvtsd2si64(__a); + return __builtin_ia32_cvtsd2si64((__v2df)__a); } +/// \brief Converts the first (lower) element of a vector of [2 x double] into a +/// 64-bit signed integer value, truncating the result when it is inexact. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTSD2SI / CVTTSD2SI +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the +/// conversion. +/// \returns A 64-bit signed integer containing the converted value. static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvttsd_si64(__m128d __a) { - return __a[0]; + return __builtin_ia32_cvttsd2si64((__v2df)__a); } #endif +/// \brief Converts a vector of [4 x i32] into a vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTDQ2PS / CVTDQ2PS instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \returns A 128-bit vector of [4 x float] containing the converted values. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtepi32_ps(__m128i __a) { return __builtin_ia32_cvtdq2ps((__v4si)__a); } +/// \brief Converts a vector of [4 x float] into a vector of [4 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPS2DQ / CVTPS2DQ instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit integer vector of [4 x i32] containing the converted +/// values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtps_epi32(__m128 __a) { - return (__m128i)__builtin_ia32_cvtps2dq(__a); + return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)__a); } +/// \brief Converts a vector of [4 x float] into a vector of [4 x i32], +/// truncating the result when it is inexact. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTPS2DQ / CVTTPS2DQ +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x i32] containing the converted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttps_epi32(__m128 __a) { - return (__m128i)__builtin_ia32_cvttps2dq(__a); + return (__m128i)__builtin_ia32_cvttps2dq((__v4sf)__a); } +/// \brief Returns a vector of [4 x i32] where the lowest element is the input +/// operand and the remaining elements are zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVD / MOVD instruction. +/// +/// \param __a +/// A 32-bit signed integer operand. +/// \returns A 128-bit vector of [4 x i32]. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtsi32_si128(int __a) { @@ -1073,6 +3330,16 @@ _mm_cvtsi32_si128(int __a) } #ifdef __x86_64__ +/// \brief Returns a vector of [2 x i64] where the lower element is the input +/// operand and the upper element is zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVQ / MOVQ instruction. +/// +/// \param __a +/// A 64-bit signed integer operand containing the value to be converted. +/// \returns A 128-bit vector of [2 x i64] containing the converted value. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtsi64_si128(long long __a) { @@ -1080,6 +3347,17 @@ _mm_cvtsi64_si128(long long __a) } #endif +/// \brief Moves the least significant 32 bits of a vector of [4 x i32] to a +/// 32-bit signed integer value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVD / MOVD instruction. +/// +/// \param __a +/// A vector of [4 x i32]. The least significant 32 bits are moved to the +/// destination. +/// \returns A 32-bit signed integer containing the moved value. static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtsi128_si32(__m128i __a) { @@ -1088,6 +3366,17 @@ _mm_cvtsi128_si32(__m128i __a) } #ifdef __x86_64__ +/// \brief Moves the least significant 64 bits of a vector of [2 x i64] to a +/// 64-bit signed integer value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVQ / MOVQ instruction. +/// +/// \param __a +/// A vector of [2 x i64]. The least significant 64 bits are moved to the +/// destination. +/// \returns A 64-bit signed integer containing the moved value. static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvtsi128_si64(__m128i __a) { @@ -1095,12 +3384,32 @@ _mm_cvtsi128_si64(__m128i __a) } #endif +/// \brief Moves packed integer values from an aligned 128-bit memory location +/// to elements in a 128-bit integer vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDQA / MOVDQA instruction. +/// +/// \param __p +/// An aligned pointer to a memory location containing integer values. +/// \returns A 128-bit integer vector containing the moved values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_load_si128(__m128i const *__p) { return *__p; } +/// \brief Moves packed integer values from an unaligned 128-bit memory location +/// to elements in a 128-bit integer vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDQU / MOVDQU instruction. +/// +/// \param __p +/// A pointer to a memory location containing integer values. +/// \returns A 128-bit integer vector containing the moved values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si128(__m128i const *__p) { @@ -1110,6 +3419,18 @@ _mm_loadu_si128(__m128i const *__p) return ((struct __loadu_si128*)__p)->__v; } +/// \brief Returns a vector of [2 x i64] where the lower element is taken from +/// the lower element of the operand, and the upper element is zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVQ / MOVQ instruction. +/// +/// \param __p +/// A 128-bit vector of [2 x i64]. Bits [63:0] are written to bits [63:0] of +/// the destination. +/// \returns A 128-bit vector of [2 x i64]. The lower order bits contain the +/// moved value. The higher order bits are cleared. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadl_epi64(__m128i const *__p) { @@ -1119,114 +3440,488 @@ _mm_loadl_epi64(__m128i const *__p) return (__m128i) { ((struct __mm_loadl_epi64_struct*)__p)->__u, 0}; } +/// \brief Generates a 128-bit vector of [4 x i32] with unspecified content. +/// This could be used as an argument to another intrinsic function where the +/// argument is required but the value is not actually used. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \returns A 128-bit vector of [4 x i32] with unspecified content. static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_set_epi64x(long long q1, long long q0) +_mm_undefined_si128(void) { - return (__m128i){ q0, q1 }; + return (__m128i)__builtin_ia32_undef128(); } +/// \brief Initializes both 64-bit values in a 128-bit vector of [2 x i64] with +/// the specified 64-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __q1 +/// A 64-bit integer value used to initialize the upper 64 bits of the +/// destination vector of [2 x i64]. +/// \param __q0 +/// A 64-bit integer value used to initialize the lower 64 bits of the +/// destination vector of [2 x i64]. +/// \returns An initialized 128-bit vector of [2 x i64] containing the values +/// provided in the operands. static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_set_epi64(__m64 q1, __m64 q0) +_mm_set_epi64x(long long __q1, long long __q0) { - return (__m128i){ (long long)q0, (long long)q1 }; + return (__m128i){ __q0, __q1 }; } +/// \brief Initializes both 64-bit values in a 128-bit vector of [2 x i64] with +/// the specified 64-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __q1 +/// A 64-bit integer value used to initialize the upper 64 bits of the +/// destination vector of [2 x i64]. +/// \param __q0 +/// A 64-bit integer value used to initialize the lower 64 bits of the +/// destination vector of [2 x i64]. +/// \returns An initialized 128-bit vector of [2 x i64] containing the values +/// provided in the operands. static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_set_epi32(int i3, int i2, int i1, int i0) +_mm_set_epi64(__m64 __q1, __m64 __q0) { - return (__m128i)(__v4si){ i0, i1, i2, i3}; + return (__m128i){ (long long)__q0, (long long)__q1 }; } +/// \brief Initializes the 32-bit values in a 128-bit vector of [4 x i32] with +/// the specified 32-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __i3 +/// A 32-bit integer value used to initialize bits [127:96] of the +/// destination vector. +/// \param __i2 +/// A 32-bit integer value used to initialize bits [95:64] of the destination +/// vector. +/// \param __i1 +/// A 32-bit integer value used to initialize bits [63:32] of the destination +/// vector. +/// \param __i0 +/// A 32-bit integer value used to initialize bits [31:0] of the destination +/// vector. +/// \returns An initialized 128-bit vector of [4 x i32] containing the values +/// provided in the operands. static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_set_epi16(short w7, short w6, short w5, short w4, short w3, short w2, short w1, short w0) +_mm_set_epi32(int __i3, int __i2, int __i1, int __i0) { - return (__m128i)(__v8hi){ w0, w1, w2, w3, w4, w5, w6, w7 }; + return (__m128i)(__v4si){ __i0, __i1, __i2, __i3}; } +/// \brief Initializes the 16-bit values in a 128-bit vector of [8 x i16] with +/// the specified 16-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __w7 +/// A 16-bit integer value used to initialize bits [127:112] of the +/// destination vector. +/// \param __w6 +/// A 16-bit integer value used to initialize bits [111:96] of the +/// destination vector. +/// \param __w5 +/// A 16-bit integer value used to initialize bits [95:80] of the destination +/// vector. +/// \param __w4 +/// A 16-bit integer value used to initialize bits [79:64] of the destination +/// vector. +/// \param __w3 +/// A 16-bit integer value used to initialize bits [63:48] of the destination +/// vector. +/// \param __w2 +/// A 16-bit integer value used to initialize bits [47:32] of the destination +/// vector. +/// \param __w1 +/// A 16-bit integer value used to initialize bits [31:16] of the destination +/// vector. +/// \param __w0 +/// A 16-bit integer value used to initialize bits [15:0] of the destination +/// vector. +/// \returns An initialized 128-bit vector of [8 x i16] containing the values +/// provided in the operands. static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_set_epi8(char b15, char b14, char b13, char b12, char b11, char b10, char b9, char b8, char b7, char b6, char b5, char b4, char b3, char b2, char b1, char b0) +_mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3, short __w2, short __w1, short __w0) { - return (__m128i)(__v16qi){ b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15 }; + return (__m128i)(__v8hi){ __w0, __w1, __w2, __w3, __w4, __w5, __w6, __w7 }; } +/// \brief Initializes the 8-bit values in a 128-bit vector of [16 x i8] with +/// the specified 8-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __b15 +/// Initializes bits [127:120] of the destination vector. +/// \param __b14 +/// Initializes bits [119:112] of the destination vector. +/// \param __b13 +/// Initializes bits [111:104] of the destination vector. +/// \param __b12 +/// Initializes bits [103:96] of the destination vector. +/// \param __b11 +/// Initializes bits [95:88] of the destination vector. +/// \param __b10 +/// Initializes bits [87:80] of the destination vector. +/// \param __b9 +/// Initializes bits [79:72] of the destination vector. +/// \param __b8 +/// Initializes bits [71:64] of the destination vector. +/// \param __b7 +/// Initializes bits [63:56] of the destination vector. +/// \param __b6 +/// Initializes bits [55:48] of the destination vector. +/// \param __b5 +/// Initializes bits [47:40] of the destination vector. +/// \param __b4 +/// Initializes bits [39:32] of the destination vector. +/// \param __b3 +/// Initializes bits [31:24] of the destination vector. +/// \param __b2 +/// Initializes bits [23:16] of the destination vector. +/// \param __b1 +/// Initializes bits [15:8] of the destination vector. +/// \param __b0 +/// Initializes bits [7:0] of the destination vector. +/// \returns An initialized 128-bit vector of [16 x i8] containing the values +/// provided in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b9, char __b8, char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0) +{ + return (__m128i)(__v16qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15 }; +} + +/// \brief Initializes both values in a 128-bit integer vector with the +/// specified 64-bit integer value. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __q +/// Integer value used to initialize the elements of the destination integer +/// vector. +/// \returns An initialized 128-bit integer vector of [2 x i64] with both +/// elements containing the value provided in the operand. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi64x(long long __q) { return (__m128i){ __q, __q }; } +/// \brief Initializes both values in a 128-bit vector of [2 x i64] with the +/// specified 64-bit value. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __q +/// A 64-bit value used to initialize the elements of the destination integer +/// vector. +/// \returns An initialized 128-bit vector of [2 x i64] with all elements +/// containing the value provided in the operand. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi64(__m64 __q) { return (__m128i){ (long long)__q, (long long)__q }; } +/// \brief Initializes all values in a 128-bit vector of [4 x i32] with the +/// specified 32-bit value. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __i +/// A 32-bit value used to initialize the elements of the destination integer +/// vector. +/// \returns An initialized 128-bit vector of [4 x i32] with all elements +/// containing the value provided in the operand. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi32(int __i) { return (__m128i)(__v4si){ __i, __i, __i, __i }; } +/// \brief Initializes all values in a 128-bit vector of [8 x i16] with the +/// specified 16-bit value. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __w +/// A 16-bit value used to initialize the elements of the destination integer +/// vector. +/// \returns An initialized 128-bit vector of [8 x i16] with all elements +/// containing the value provided in the operand. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi16(short __w) { return (__m128i)(__v8hi){ __w, __w, __w, __w, __w, __w, __w, __w }; } +/// \brief Initializes all values in a 128-bit vector of [16 x i8] with the +/// specified 8-bit value. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __b +/// An 8-bit value used to initialize the elements of the destination integer +/// vector. +/// \returns An initialized 128-bit vector of [16 x i8] with all elements +/// containing the value provided in the operand. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi8(char __b) { return (__m128i)(__v16qi){ __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b }; } +/// \brief Constructs a 128-bit integer vector, initialized in reverse order +/// with the specified 64-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKLQDQ / PUNPCKLQDQ +/// instruction. +/// +/// \param __q0 +/// A 64-bit integral value used to initialize the lower 64 bits of the +/// result. +/// \param __q1 +/// A 64-bit integral value used to initialize the upper 64 bits of the +/// result. +/// \returns An initialized 128-bit integer vector. static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_setr_epi64(__m64 q0, __m64 q1) +_mm_setr_epi64(__m64 __q0, __m64 __q1) { - return (__m128i){ (long long)q0, (long long)q1 }; + return (__m128i){ (long long)__q0, (long long)__q1 }; } +/// \brief Constructs a 128-bit integer vector, initialized in reverse order +/// with the specified 32-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __i0 +/// A 32-bit integral value used to initialize bits [31:0] of the result. +/// \param __i1 +/// A 32-bit integral value used to initialize bits [63:32] of the result. +/// \param __i2 +/// A 32-bit integral value used to initialize bits [95:64] of the result. +/// \param __i3 +/// A 32-bit integral value used to initialize bits [127:96] of the result. +/// \returns An initialized 128-bit integer vector. static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_setr_epi32(int i0, int i1, int i2, int i3) +_mm_setr_epi32(int __i0, int __i1, int __i2, int __i3) { - return (__m128i)(__v4si){ i0, i1, i2, i3}; + return (__m128i)(__v4si){ __i0, __i1, __i2, __i3}; } +/// \brief Constructs a 128-bit integer vector, initialized in reverse order +/// with the specified 16-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __w0 +/// A 16-bit integral value used to initialize bits [15:0] of the result. +/// \param __w1 +/// A 16-bit integral value used to initialize bits [31:16] of the result. +/// \param __w2 +/// A 16-bit integral value used to initialize bits [47:32] of the result. +/// \param __w3 +/// A 16-bit integral value used to initialize bits [63:48] of the result. +/// \param __w4 +/// A 16-bit integral value used to initialize bits [79:64] of the result. +/// \param __w5 +/// A 16-bit integral value used to initialize bits [95:80] of the result. +/// \param __w6 +/// A 16-bit integral value used to initialize bits [111:96] of the result. +/// \param __w7 +/// A 16-bit integral value used to initialize bits [127:112] of the result. +/// \returns An initialized 128-bit integer vector. static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_setr_epi16(short w0, short w1, short w2, short w3, short w4, short w5, short w6, short w7) +_mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4, short __w5, short __w6, short __w7) { - return (__m128i)(__v8hi){ w0, w1, w2, w3, w4, w5, w6, w7 }; + return (__m128i)(__v8hi){ __w0, __w1, __w2, __w3, __w4, __w5, __w6, __w7 }; } +/// \brief Constructs a 128-bit integer vector, initialized in reverse order +/// with the specified 8-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __b0 +/// An 8-bit integral value used to initialize bits [7:0] of the result. +/// \param __b1 +/// An 8-bit integral value used to initialize bits [15:8] of the result. +/// \param __b2 +/// An 8-bit integral value used to initialize bits [23:16] of the result. +/// \param __b3 +/// An 8-bit integral value used to initialize bits [31:24] of the result. +/// \param __b4 +/// An 8-bit integral value used to initialize bits [39:32] of the result. +/// \param __b5 +/// An 8-bit integral value used to initialize bits [47:40] of the result. +/// \param __b6 +/// An 8-bit integral value used to initialize bits [55:48] of the result. +/// \param __b7 +/// An 8-bit integral value used to initialize bits [63:56] of the result. +/// \param __b8 +/// An 8-bit integral value used to initialize bits [71:64] of the result. +/// \param __b9 +/// An 8-bit integral value used to initialize bits [79:72] of the result. +/// \param __b10 +/// An 8-bit integral value used to initialize bits [87:80] of the result. +/// \param __b11 +/// An 8-bit integral value used to initialize bits [95:88] of the result. +/// \param __b12 +/// An 8-bit integral value used to initialize bits [103:96] of the result. +/// \param __b13 +/// An 8-bit integral value used to initialize bits [111:104] of the result. +/// \param __b14 +/// An 8-bit integral value used to initialize bits [119:112] of the result. +/// \param __b15 +/// An 8-bit integral value used to initialize bits [127:120] of the result. +/// \returns An initialized 128-bit integer vector. static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_setr_epi8(char b0, char b1, char b2, char b3, char b4, char b5, char b6, char b7, char b8, char b9, char b10, char b11, char b12, char b13, char b14, char b15) +_mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7, char __b8, char __b9, char __b10, char __b11, char __b12, char __b13, char __b14, char __b15) { - return (__m128i)(__v16qi){ b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15 }; + return (__m128i)(__v16qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15 }; } +/// \brief Creates a 128-bit integer vector initialized to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPS / XORPS instruction. +/// +/// \returns An initialized 128-bit integer vector with all elements set to +/// zero. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setzero_si128(void) { return (__m128i){ 0LL, 0LL }; } +/// \brief Stores a 128-bit integer vector to a memory location aligned on a +/// 128-bit boundary. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPS / MOVAPS instruction. +/// +/// \param __p +/// A pointer to an aligned memory location that will receive the integer +/// values. +/// \param __b +/// A 128-bit integer vector containing the values to be moved. static __inline__ void __DEFAULT_FN_ATTRS _mm_store_si128(__m128i *__p, __m128i __b) { *__p = __b; } +/// \brief Stores a 128-bit integer vector to an unaligned memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPS / MOVUPS instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the integer values. +/// \param __b +/// A 128-bit integer vector containing the values to be moved. static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si128(__m128i *__p, __m128i __b) { - __builtin_ia32_storedqu((char *)__p, (__v16qi)__b); + struct __storeu_si128 { + __m128i __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_si128*)__p)->__v = __b; } +/// \brief Moves bytes selected by the mask from the first operand to the +/// specified unaligned memory location. When a mask bit is 1, the +/// corresponding byte is written, otherwise it is not written. To minimize +/// caching, the date is flagged as non-temporal (unlikely to be used again +/// soon). Exception and trap behavior for elements not selected for storage +/// to memory are implementation dependent. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVDQU / MASKMOVDQU +/// instruction. +/// +/// \param __d +/// A 128-bit integer vector containing the values to be moved. +/// \param __n +/// A 128-bit integer vector containing the mask. The most significant bit of +/// each byte represents the mask bits. +/// \param __p +/// A pointer to an unaligned 128-bit memory location where the specified +/// values are moved. static __inline__ void __DEFAULT_FN_ATTRS _mm_maskmoveu_si128(__m128i __d, __m128i __n, char *__p) { __builtin_ia32_maskmovdqu((__v16qi)__d, (__v16qi)__n, __p); } +/// \brief Stores the lower 64 bits of a 128-bit integer vector of [2 x i64] to +/// a memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVLPS / MOVLPS instruction. +/// +/// \param __p +/// A pointer to a 64-bit memory location that will receive the lower 64 bits +/// of the integer vector parameter. +/// \param __a +/// A 128-bit integer vector of [2 x i64]. The lower 64 bits contain the +/// value to be stored. static __inline__ void __DEFAULT_FN_ATTRS _mm_storel_epi64(__m128i *__p, __m128i __a) { @@ -1236,18 +3931,54 @@ _mm_storel_epi64(__m128i *__p, __m128i __a) ((struct __mm_storel_epi64_struct*)__p)->__u = __a[0]; } +/// \brief Stores a 128-bit floating point vector of [2 x double] to a 128-bit +/// aligned memory location. To minimize caching, the data is flagged as +/// non-temporal (unlikely to be used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVNTPS / MOVNTPS instruction. +/// +/// \param __p +/// A pointer to the 128-bit aligned memory location used to store the value. +/// \param __a +/// A vector of [2 x double] containing the 64-bit values to be stored. static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_pd(double *__p, __m128d __a) { - __builtin_ia32_movntpd(__p, __a); + __builtin_nontemporal_store((__v2df)__a, (__v2df*)__p); } +/// \brief Stores a 128-bit integer vector to a 128-bit aligned memory location. +/// To minimize caching, the data is flagged as non-temporal (unlikely to be +/// used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVNTPS / MOVNTPS instruction. +/// +/// \param __p +/// A pointer to the 128-bit aligned memory location used to store the value. +/// \param __a +/// A 128-bit integer vector containing the values to be stored. static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_si128(__m128i *__p, __m128i __a) { - __builtin_ia32_movntdq(__p, __a); + __builtin_nontemporal_store((__v2di)__a, (__v2di*)__p); } +/// \brief Stores a 32-bit integer value in the specified memory location. To +/// minimize caching, the data is flagged as non-temporal (unlikely to be +/// used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVNTI instruction. +/// +/// \param __p +/// A pointer to the 32-bit memory location used to store the value. +/// \param __a +/// A 32-bit integer containing the value to be stored. static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_si32(int *__p, int __a) { @@ -1255,6 +3986,18 @@ _mm_stream_si32(int *__p, int __a) } #ifdef __x86_64__ +/// \brief Stores a 64-bit integer value in the specified memory location. To +/// minimize caching, the data is flagged as non-temporal (unlikely to be +/// used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVNTIQ instruction. +/// +/// \param __p +/// A pointer to the 64-bit memory location used to store the value. +/// \param __a +/// A 64-bit integer containing the value to be stored. static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_si64(long long *__p, long long __a) { @@ -1262,42 +4005,154 @@ _mm_stream_si64(long long *__p, long long __a) } #endif -static __inline__ void __DEFAULT_FN_ATTRS -_mm_clflush(void const *__p) -{ - __builtin_ia32_clflush(__p); -} +#if defined(__cplusplus) +extern "C" { +#endif -static __inline__ void __DEFAULT_FN_ATTRS -_mm_lfence(void) -{ - __builtin_ia32_lfence(); -} +/// \brief The cache line containing \a __p is flushed and invalidated from all +/// caches in the coherency domain. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CLFLUSH instruction. +/// +/// \param __p +/// A pointer to the memory location used to identify the cache line to be +/// flushed. +void _mm_clflush(void const *); -static __inline__ void __DEFAULT_FN_ATTRS -_mm_mfence(void) -{ - __builtin_ia32_mfence(); -} +/// \brief Forces strong memory ordering (serialization) between load +/// instructions preceding this instruction and load instructions following +/// this instruction, ensuring the system completes all previous loads before +/// executing subsequent loads. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the LFENCE instruction. +/// +void _mm_lfence(void); +/// \brief Forces strong memory ordering (serialization) between load and store +/// instructions preceding this instruction and load and store instructions +/// following this instruction, ensuring that the system completes all +/// previous memory accesses before executing subsequent memory accesses. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MFENCE instruction. +/// +void _mm_mfence(void); + +#if defined(__cplusplus) +} // extern "C" +#endif + +/// \brief Converts 16-bit signed integers from both 128-bit integer vector +/// operands into 8-bit signed integers, and packs the results into the +/// destination. Positive values greater than 0x7F are saturated to 0x7F. +/// Negative values less than 0x80 are saturated to 0x80. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPACKSSWB / PACKSSWB instruction. +/// +/// \param __a +/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as +/// a signed integer and is converted to a 8-bit signed integer with +/// saturation. Values greater than 0x7F are saturated to 0x7F. Values less +/// than 0x80 are saturated to 0x80. The converted [8 x i8] values are +/// written to the lower 64 bits of the result. +/// \param __b +/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as +/// a signed integer and is converted to a 8-bit signed integer with +/// saturation. Values greater than 0x7F are saturated to 0x7F. Values less +/// than 0x80 are saturated to 0x80. The converted [8 x i8] values are +/// written to the higher 64 bits of the result. +/// \returns A 128-bit vector of [16 x i8] containing the converted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi16(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_packsswb128((__v8hi)__a, (__v8hi)__b); } +/// \brief Converts 32-bit signed integers from both 128-bit integer vector +/// operands into 16-bit signed integers, and packs the results into the +/// destination. Positive values greater than 0x7FFF are saturated to 0x7FFF. +/// Negative values less than 0x8000 are saturated to 0x8000. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPACKSSDW / PACKSSDW instruction. +/// +/// \param __a +/// A 128-bit integer vector of [4 x i32]. Each 32-bit element is treated as +/// a signed integer and is converted to a 16-bit signed integer with +/// saturation. Values greater than 0x7FFF are saturated to 0x7FFF. Values +/// less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values +/// are written to the lower 64 bits of the result. +/// \param __b +/// A 128-bit integer vector of [4 x i32]. Each 32-bit element is treated as +/// a signed integer and is converted to a 16-bit signed integer with +/// saturation. Values greater than 0x7FFF are saturated to 0x7FFF. Values +/// less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values +/// are written to the higher 64 bits of the result. +/// \returns A 128-bit vector of [8 x i16] containing the converted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi32(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_packssdw128((__v4si)__a, (__v4si)__b); } +/// \brief Converts 16-bit signed integers from both 128-bit integer vector +/// operands into 8-bit unsigned integers, and packs the results into the +/// destination. Values greater than 0xFF are saturated to 0xFF. Values less +/// than 0x00 are saturated to 0x00. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPACKUSWB / PACKUSWB instruction. +/// +/// \param __a +/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as +/// a signed integer and is converted to an 8-bit unsigned integer with +/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less +/// than 0x00 are saturated to 0x00. The converted [8 x i8] values are +/// written to the lower 64 bits of the result. +/// \param __b +/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as +/// a signed integer and is converted to an 8-bit unsigned integer with +/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less +/// than 0x00 are saturated to 0x00. The converted [8 x i8] values are +/// written to the higher 64 bits of the result. +/// \returns A 128-bit vector of [16 x i8] containing the converted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi16(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_packuswb128((__v8hi)__a, (__v8hi)__b); } +/// \brief Extracts 16 bits from a 128-bit integer vector of [8 x i16], using +/// the immediate-value parameter as a selector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPEXTRW / PEXTRW instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __imm +/// An immediate value. Bits [3:0] selects values from \a __a to be assigned +/// to bits[15:0] of the result. \n +/// 000: assign values from bits [15:0] of \a __a. \n +/// 001: assign values from bits [31:16] of \a __a. \n +/// 010: assign values from bits [47:32] of \a __a. \n +/// 011: assign values from bits [63:48] of \a __a. \n +/// 100: assign values from bits [79:64] of \a __a. \n +/// 101: assign values from bits [95:80] of \a __a. \n +/// 110: assign values from bits [111:96] of \a __a. \n +/// 111: assign values from bits [127:112] of \a __a. +/// \returns An integer, whose lower 16 bits are selected from the 128-bit +/// integer vector parameter and the remaining bits are assigned zeros. static __inline__ int __DEFAULT_FN_ATTRS _mm_extract_epi16(__m128i __a, int __imm) { @@ -1305,6 +4160,26 @@ _mm_extract_epi16(__m128i __a, int __imm) return (unsigned short)__b[__imm & 7]; } +/// \brief Constructs a 128-bit integer vector by first making a copy of the +/// 128-bit integer vector parameter, and then inserting the lower 16 bits +/// of an integer parameter into an offset specified by the immediate-value +/// parameter. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPINSRW / PINSRW instruction. +/// +/// \param __a +/// A 128-bit integer vector of [8 x i16]. This vector is copied to the +/// result and then one of the eight elements in the result is replaced by +/// the lower 16 bits of \a __b. +/// \param __b +/// An integer. The lower 16 bits of this parameter are written to the +/// result beginning at an offset specified by \a __imm. +/// \param __imm +/// An immediate value specifying the bit offset in the result at which the +/// lower 16 bits of \a __b are written. +/// \returns A 128-bit integer vector containing the constructed values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_insert_epi16(__m128i __a, int __b, int __imm) { @@ -1313,168 +4188,604 @@ _mm_insert_epi16(__m128i __a, int __b, int __imm) return (__m128i)__c; } +/// \brief Copies the values of the most significant bits from each 8-bit +/// element in a 128-bit integer vector of [16 x i8] to create a 16-bit mask +/// value, zero-extends the value, and writes it to the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVMSKB / PMOVMSKB instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the values with bits to be extracted. +/// \returns The most significant bits from each 8-bit element in \a __a, +/// written to bits [15:0]. The other bits are assigned zeros. static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_epi8(__m128i __a) { return __builtin_ia32_pmovmskb128((__v16qi)__a); } +/// \brief Constructs a 128-bit integer vector by shuffling four 32-bit +/// elements of a 128-bit integer vector parameter, using the immediate-value +/// parameter as a specifier. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_shuffle_epi32(__m128i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VPSHUFD / PSHUFD instruction. +/// +/// \param a +/// A 128-bit integer vector containing the values to be copied. +/// \param imm +/// An immediate value containing an 8-bit value specifying which elements to +/// copy from a. The destinations within the 128-bit destination are assigned +/// values as follows: \n +/// Bits [1:0] are used to assign values to bits [31:0] of the result. \n +/// Bits [3:2] are used to assign values to bits [63:32] of the result. \n +/// Bits [5:4] are used to assign values to bits [95:64] of the result. \n +/// Bits [7:6] are used to assign values to bits [127:96] of the result. \n +/// Bit value assignments: \n +/// 00: assign values from bits [31:0] of \a a. \n +/// 01: assign values from bits [63:32] of \a a. \n +/// 10: assign values from bits [95:64] of \a a. \n +/// 11: assign values from bits [127:96] of \a a. +/// \returns A 128-bit integer vector containing the shuffled values. #define _mm_shuffle_epi32(a, imm) __extension__ ({ \ (__m128i)__builtin_shufflevector((__v4si)(__m128i)(a), \ - (__v4si)_mm_set1_epi32(0), \ - (imm) & 0x3, ((imm) & 0xc) >> 2, \ - ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6); }) + (__v4si)_mm_undefined_si128(), \ + ((imm) >> 0) & 0x3, ((imm) >> 2) & 0x3, \ + ((imm) >> 4) & 0x3, ((imm) >> 6) & 0x3); }) +/// \brief Constructs a 128-bit integer vector by shuffling four lower 16-bit +/// elements of a 128-bit integer vector of [8 x i16], using the immediate +/// value parameter as a specifier. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_shufflelo_epi16(__m128i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VPSHUFLW / PSHUFLW instruction. +/// +/// \param a +/// A 128-bit integer vector of [8 x i16]. Bits [127:64] are copied to bits +/// [127:64] of the result. +/// \param imm +/// An 8-bit immediate value specifying which elements to copy from \a a. \n +/// Bits[1:0] are used to assign values to bits [15:0] of the result. \n +/// Bits[3:2] are used to assign values to bits [31:16] of the result. \n +/// Bits[5:4] are used to assign values to bits [47:32] of the result. \n +/// Bits[7:6] are used to assign values to bits [63:48] of the result. \n +/// Bit value assignments: \n +/// 00: assign values from bits [15:0] of \a a. \n +/// 01: assign values from bits [31:16] of \a a. \n +/// 10: assign values from bits [47:32] of \a a. \n +/// 11: assign values from bits [63:48] of \a a. \n +/// \returns A 128-bit integer vector containing the shuffled values. #define _mm_shufflelo_epi16(a, imm) __extension__ ({ \ (__m128i)__builtin_shufflevector((__v8hi)(__m128i)(a), \ - (__v8hi)_mm_set1_epi16(0), \ - (imm) & 0x3, ((imm) & 0xc) >> 2, \ - ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \ + (__v8hi)_mm_undefined_si128(), \ + ((imm) >> 0) & 0x3, ((imm) >> 2) & 0x3, \ + ((imm) >> 4) & 0x3, ((imm) >> 6) & 0x3, \ 4, 5, 6, 7); }) +/// \brief Constructs a 128-bit integer vector by shuffling four upper 16-bit +/// elements of a 128-bit integer vector of [8 x i16], using the immediate +/// value parameter as a specifier. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_shufflehi_epi16(__m128i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VPSHUFHW / PSHUFHW instruction. +/// +/// \param a +/// A 128-bit integer vector of [8 x i16]. Bits [63:0] are copied to bits +/// [63:0] of the result. +/// \param imm +/// An 8-bit immediate value specifying which elements to copy from \a a. \n +/// Bits[1:0] are used to assign values to bits [79:64] of the result. \n +/// Bits[3:2] are used to assign values to bits [95:80] of the result. \n +/// Bits[5:4] are used to assign values to bits [111:96] of the result. \n +/// Bits[7:6] are used to assign values to bits [127:112] of the result. \n +/// Bit value assignments: \n +/// 00: assign values from bits [79:64] of \a a. \n +/// 01: assign values from bits [95:80] of \a a. \n +/// 10: assign values from bits [111:96] of \a a. \n +/// 11: assign values from bits [127:112] of \a a. \n +/// \returns A 128-bit integer vector containing the shuffled values. #define _mm_shufflehi_epi16(a, imm) __extension__ ({ \ (__m128i)__builtin_shufflevector((__v8hi)(__m128i)(a), \ - (__v8hi)_mm_set1_epi16(0), \ + (__v8hi)_mm_undefined_si128(), \ 0, 1, 2, 3, \ - 4 + (((imm) & 0x03) >> 0), \ - 4 + (((imm) & 0x0c) >> 2), \ - 4 + (((imm) & 0x30) >> 4), \ - 4 + (((imm) & 0xc0) >> 6)); }) + 4 + (((imm) >> 0) & 0x3), \ + 4 + (((imm) >> 2) & 0x3), \ + 4 + (((imm) >> 4) & 0x3), \ + 4 + (((imm) >> 6) & 0x3)); }) +/// \brief Unpacks the high-order (index 8-15) values from two 128-bit vectors +/// of [16 x i8] and interleaves them into a 128-bit vector of [16 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKHBW / PUNPCKHBW +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [16 x i8]. +/// Bits [71:64] are written to bits [7:0] of the result. \n +/// Bits [79:72] are written to bits [23:16] of the result. \n +/// Bits [87:80] are written to bits [39:32] of the result. \n +/// Bits [95:88] are written to bits [55:48] of the result. \n +/// Bits [103:96] are written to bits [71:64] of the result. \n +/// Bits [111:104] are written to bits [87:80] of the result. \n +/// Bits [119:112] are written to bits [103:96] of the result. \n +/// Bits [127:120] are written to bits [119:112] of the result. +/// \param __b +/// A 128-bit vector of [16 x i8]. \n +/// Bits [71:64] are written to bits [15:8] of the result. \n +/// Bits [79:72] are written to bits [31:24] of the result. \n +/// Bits [87:80] are written to bits [47:40] of the result. \n +/// Bits [95:88] are written to bits [63:56] of the result. \n +/// Bits [103:96] are written to bits [79:72] of the result. \n +/// Bits [111:104] are written to bits [95:88] of the result. \n +/// Bits [119:112] are written to bits [111:104] of the result. \n +/// Bits [127:120] are written to bits [127:120] of the result. +/// \returns A 128-bit vector of [16 x i8] containing the interleaved values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi8(__m128i __a, __m128i __b) { return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15); } +/// \brief Unpacks the high-order (index 4-7) values from two 128-bit vectors of +/// [8 x i16] and interleaves them into a 128-bit vector of [8 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKHWD / PUNPCKHWD +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16]. +/// Bits [79:64] are written to bits [15:0] of the result. \n +/// Bits [95:80] are written to bits [47:32] of the result. \n +/// Bits [111:96] are written to bits [79:64] of the result. \n +/// Bits [127:112] are written to bits [111:96] of the result. +/// \param __b +/// A 128-bit vector of [8 x i16]. +/// Bits [79:64] are written to bits [31:16] of the result. \n +/// Bits [95:80] are written to bits [63:48] of the result. \n +/// Bits [111:96] are written to bits [95:80] of the result. \n +/// Bits [127:112] are written to bits [127:112] of the result. +/// \returns A 128-bit vector of [8 x i16] containing the interleaved values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi16(__m128i __a, __m128i __b) { return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7); } +/// \brief Unpacks the high-order (index 2,3) values from two 128-bit vectors of +/// [4 x i32] and interleaves them into a 128-bit vector of [4 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKHDQ / PUNPCKHDQ +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x i32]. \n +/// Bits [95:64] are written to bits [31:0] of the destination. \n +/// Bits [127:96] are written to bits [95:64] of the destination. +/// \param __b +/// A 128-bit vector of [4 x i32]. \n +/// Bits [95:64] are written to bits [64:32] of the destination. \n +/// Bits [127:96] are written to bits [127:96] of the destination. +/// \returns A 128-bit vector of [4 x i32] containing the interleaved values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi32(__m128i __a, __m128i __b) { return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 2, 4+2, 3, 4+3); } +/// \brief Unpacks the high-order (odd-indexed) values from two 128-bit vectors +/// of [2 x i64] and interleaves them into a 128-bit vector of [2 x i64]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKHQDQ / PUNPCKHQDQ +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x i64]. \n +/// Bits [127:64] are written to bits [63:0] of the destination. +/// \param __b +/// A 128-bit vector of [2 x i64]. \n +/// Bits [127:64] are written to bits [127:64] of the destination. +/// \returns A 128-bit vector of [2 x i64] containing the interleaved values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi64(__m128i __a, __m128i __b) { - return (__m128i)__builtin_shufflevector(__a, __b, 1, 2+1); + return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 1, 2+1); } +/// \brief Unpacks the low-order (index 0-7) values from two 128-bit vectors of +/// [16 x i8] and interleaves them into a 128-bit vector of [16 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKLBW / PUNPCKLBW +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [16 x i8]. \n +/// Bits [7:0] are written to bits [7:0] of the result. \n +/// Bits [15:8] are written to bits [23:16] of the result. \n +/// Bits [23:16] are written to bits [39:32] of the result. \n +/// Bits [31:24] are written to bits [55:48] of the result. \n +/// Bits [39:32] are written to bits [71:64] of the result. \n +/// Bits [47:40] are written to bits [87:80] of the result. \n +/// Bits [55:48] are written to bits [103:96] of the result. \n +/// Bits [63:56] are written to bits [119:112] of the result. +/// \param __b +/// A 128-bit vector of [16 x i8]. +/// Bits [7:0] are written to bits [15:8] of the result. \n +/// Bits [15:8] are written to bits [31:24] of the result. \n +/// Bits [23:16] are written to bits [47:40] of the result. \n +/// Bits [31:24] are written to bits [63:56] of the result. \n +/// Bits [39:32] are written to bits [79:72] of the result. \n +/// Bits [47:40] are written to bits [95:88] of the result. \n +/// Bits [55:48] are written to bits [111:104] of the result. \n +/// Bits [63:56] are written to bits [127:120] of the result. +/// \returns A 128-bit vector of [16 x i8] containing the interleaved values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi8(__m128i __a, __m128i __b) { return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7); } +/// \brief Unpacks the low-order (index 0-3) values from each of the two 128-bit +/// vectors of [8 x i16] and interleaves them into a 128-bit vector of +/// [8 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKLWD / PUNPCKLWD +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16]. +/// Bits [15:0] are written to bits [15:0] of the result. \n +/// Bits [31:16] are written to bits [47:32] of the result. \n +/// Bits [47:32] are written to bits [79:64] of the result. \n +/// Bits [63:48] are written to bits [111:96] of the result. +/// \param __b +/// A 128-bit vector of [8 x i16]. +/// Bits [15:0] are written to bits [31:16] of the result. \n +/// Bits [31:16] are written to bits [63:48] of the result. \n +/// Bits [47:32] are written to bits [95:80] of the result. \n +/// Bits [63:48] are written to bits [127:112] of the result. +/// \returns A 128-bit vector of [8 x i16] containing the interleaved values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi16(__m128i __a, __m128i __b) { return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3); } +/// \brief Unpacks the low-order (index 0,1) values from two 128-bit vectors of +/// [4 x i32] and interleaves them into a 128-bit vector of [4 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKLDQ / PUNPCKLDQ +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x i32]. \n +/// Bits [31:0] are written to bits [31:0] of the destination. \n +/// Bits [63:32] are written to bits [95:64] of the destination. +/// \param __b +/// A 128-bit vector of [4 x i32]. \n +/// Bits [31:0] are written to bits [64:32] of the destination. \n +/// Bits [63:32] are written to bits [127:96] of the destination. +/// \returns A 128-bit vector of [4 x i32] containing the interleaved values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi32(__m128i __a, __m128i __b) { return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 0, 4+0, 1, 4+1); } +/// \brief Unpacks the low-order 64-bit elements from two 128-bit vectors of +/// [2 x i64] and interleaves them into a 128-bit vector of [2 x i64]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKLQDQ / PUNPCKLQDQ +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x i64]. \n +/// Bits [63:0] are written to bits [63:0] of the destination. \n +/// \param __b +/// A 128-bit vector of [2 x i64]. \n +/// Bits [63:0] are written to bits [127:64] of the destination. \n +/// \returns A 128-bit vector of [2 x i64] containing the interleaved values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi64(__m128i __a, __m128i __b) { - return (__m128i)__builtin_shufflevector(__a, __b, 0, 2+0); + return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 0, 2+0); } +/// \brief Returns the lower 64 bits of a 128-bit integer vector as a 64-bit +/// integer. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit integer vector operand. The lower 64 bits are moved to the +/// destination. +/// \returns A 64-bit integer containing the lower 64 bits of the parameter. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_movepi64_pi64(__m128i __a) { return (__m64)__a[0]; } +/// \brief Moves the 64-bit operand to a 128-bit integer vector, zeroing the +/// upper bits. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVQ / MOVQ / MOVD instruction. +/// +/// \param __a +/// A 64-bit value. +/// \returns A 128-bit integer vector. The lower 64 bits contain the value from +/// the operand. The upper 64 bits are assigned zeros. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_movpi64_epi64(__m64 __a) { return (__m128i){ (long long)__a, 0 }; } +/// \brief Moves the lower 64 bits of a 128-bit integer vector to a 128-bit +/// integer vector, zeroing the upper bits. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVQ / MOVQ instruction. +/// +/// \param __a +/// A 128-bit integer vector operand. The lower 64 bits are moved to the +/// destination. +/// \returns A 128-bit integer vector. The lower 64 bits contain the value from +/// the operand. The upper 64 bits are assigned zeros. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_move_epi64(__m128i __a) { - return __builtin_shufflevector(__a, (__m128i){ 0 }, 0, 2); + return __builtin_shufflevector((__v2di)__a, (__m128i){ 0 }, 0, 2); } +/// \brief Unpacks the high-order (odd-indexed) values from two 128-bit vectors +/// of [2 x double] and interleaves them into a 128-bit vector of [2 x +/// double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKHPD / UNPCKHPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. \n +/// Bits [127:64] are written to bits [63:0] of the destination. +/// \param __b +/// A 128-bit vector of [2 x double]. \n +/// Bits [127:64] are written to bits [127:64] of the destination. +/// \returns A 128-bit vector of [2 x double] containing the interleaved values. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_unpackhi_pd(__m128d __a, __m128d __b) { - return __builtin_shufflevector(__a, __b, 1, 2+1); + return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 1, 2+1); } +/// \brief Unpacks the low-order (even-indexed) values from two 128-bit vectors +/// of [2 x double] and interleaves them into a 128-bit vector of [2 x +/// double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPD / UNPCKLPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. \n +/// Bits [63:0] are written to bits [63:0] of the destination. +/// \param __b +/// A 128-bit vector of [2 x double]. \n +/// Bits [63:0] are written to bits [127:64] of the destination. +/// \returns A 128-bit vector of [2 x double] containing the interleaved values. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_unpacklo_pd(__m128d __a, __m128d __b) { - return __builtin_shufflevector(__a, __b, 0, 2+0); + return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 0, 2+0); } +/// \brief Extracts the sign bits of the double-precision values in the 128-bit +/// vector of [2 x double], zero-extends the value, and writes it to the +/// low-order bits of the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVMSKPD / MOVMSKPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing the values with sign bits to +/// be extracted. +/// \returns The sign bits from each of the double-precision elements in \a __a, +/// written to bits [1:0]. The remaining bits are assigned values of zero. static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_pd(__m128d __a) { - return __builtin_ia32_movmskpd(__a); + return __builtin_ia32_movmskpd((__v2df)__a); } -#define _mm_shuffle_pd(a, b, i) __extension__ ({ \ - __builtin_shufflevector((__m128d)(a), (__m128d)(b), \ - (i) & 1, (((i) & 2) >> 1) + 2); }) +/// \brief Constructs a 128-bit floating-point vector of [2 x double] from two +/// 128-bit vector parameters of [2 x double], using the immediate-value +/// parameter as a specifier. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_shuffle_pd(__m128d a, __m128d b, const int i); +/// \endcode +/// +/// This intrinsic corresponds to the VSHUFPD / SHUFPD instruction. +/// +/// \param a +/// A 128-bit vector of [2 x double]. +/// \param b +/// A 128-bit vector of [2 x double]. +/// \param i +/// An 8-bit immediate value. The least significant two bits specify which +/// elements to copy from a and b: \n +/// Bit[0] = 0: lower element of a copied to lower element of result. \n +/// Bit[0] = 1: upper element of a copied to lower element of result. \n +/// Bit[1] = 0: lower element of \a b copied to upper element of result. \n +/// Bit[1] = 1: upper element of \a b copied to upper element of result. \n +/// \returns A 128-bit vector of [2 x double] containing the shuffled values. +#define _mm_shuffle_pd(a, b, i) __extension__ ({ \ + (__m128d)__builtin_shufflevector((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \ + 0 + (((i) >> 0) & 0x1), \ + 2 + (((i) >> 1) & 0x1)); }) + +/// \brief Casts a 128-bit floating-point vector of [2 x double] into a 128-bit +/// floating-point vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [2 x double]. +/// \returns A 128-bit floating-point vector of [4 x float] containing the same +/// bitwise pattern as the parameter. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_castpd_ps(__m128d __a) { return (__m128)__a; } +/// \brief Casts a 128-bit floating-point vector of [2 x double] into a 128-bit +/// integer vector. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [2 x double]. +/// \returns A 128-bit integer vector containing the same bitwise pattern as the +/// parameter. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_castpd_si128(__m128d __a) { return (__m128i)__a; } +/// \brief Casts a 128-bit floating-point vector of [4 x float] into a 128-bit +/// floating-point vector of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [4 x float]. +/// \returns A 128-bit floating-point vector of [2 x double] containing the same +/// bitwise pattern as the parameter. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_castps_pd(__m128 __a) { return (__m128d)__a; } +/// \brief Casts a 128-bit floating-point vector of [4 x float] into a 128-bit +/// integer vector. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [4 x float]. +/// \returns A 128-bit integer vector containing the same bitwise pattern as the +/// parameter. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_castps_si128(__m128 __a) { return (__m128i)__a; } +/// \brief Casts a 128-bit integer vector into a 128-bit floating-point vector +/// of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \returns A 128-bit floating-point vector of [4 x float] containing the same +/// bitwise pattern as the parameter. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_castsi128_ps(__m128i __a) { return (__m128)__a; } +/// \brief Casts a 128-bit integer vector into a 128-bit floating-point vector +/// of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \returns A 128-bit floating-point vector of [2 x double] containing the same +/// bitwise pattern as the parameter. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_castsi128_pd(__m128i __a) { return (__m128d)__a; } -static __inline__ void __DEFAULT_FN_ATTRS -_mm_pause(void) -{ - __asm__ volatile ("pause"); -} +#if defined(__cplusplus) +extern "C" { +#endif +/// \brief Indicates that a spin loop is being executed for the purposes of +/// optimizing power consumption during the loop. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PAUSE instruction. +/// +void _mm_pause(void); + +#if defined(__cplusplus) +} // extern "C" +#endif #undef __DEFAULT_FN_ATTRS #define _MM_SHUFFLE2(x, y) (((x) << 1) | (y)) -#endif /* __SSE2__ */ - #endif /* __EMMINTRIN_H */ diff --git a/c_headers/f16cintrin.h b/c_headers/f16cintrin.h index 3730ae0d3..180712ffc 100644 --- a/c_headers/f16cintrin.h +++ b/c_headers/f16cintrin.h @@ -21,43 +21,104 @@ *===-----------------------------------------------------------------------=== */ -#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H -#error "Never use directly; include instead." +#if !defined __X86INTRIN_H && !defined __EMMINTRIN_H && !defined __IMMINTRIN_H +#error "Never use directly; include instead." #endif -#ifndef __F16C__ -# error "F16C instruction is not enabled" -#endif /* __F16C__ */ - #ifndef __F16CINTRIN_H #define __F16CINTRIN_H -typedef float __v8sf __attribute__ ((__vector_size__ (32))); -typedef float __m256 __attribute__ ((__vector_size__ (32))); - /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("f16c"))) -#define _mm_cvtps_ph(a, imm) __extension__ ({ \ - __m128 __a = (a); \ - (__m128i)__builtin_ia32_vcvtps2ph((__v4sf)__a, (imm)); }) +/// \brief Converts a 16-bit half-precision float value into a 32-bit float +/// value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPH2PS instruction. +/// +/// \param __a +/// A 16-bit half-precision float value. +/// \returns The converted 32-bit float value. +static __inline float __DEFAULT_FN_ATTRS +_cvtsh_ss(unsigned short __a) +{ + __v8hi v = {(short)__a, 0, 0, 0, 0, 0, 0, 0}; + __v4sf r = __builtin_ia32_vcvtph2ps(v); + return r[0]; +} -#define _mm256_cvtps_ph(a, imm) __extension__ ({ \ - __m256 __a = (a); \ - (__m128i)__builtin_ia32_vcvtps2ph256((__v8sf)__a, (imm)); }) +/// \brief Converts a 32-bit single-precision float value to a 16-bit +/// half-precision float value. +/// +/// \headerfile +/// +/// \code +/// unsigned short _cvtss_sh(float a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VCVTPS2PH instruction. +/// +/// \param a +/// A 32-bit single-precision float value to be converted to a 16-bit +/// half-precision float value. +/// \param imm +/// An immediate value controlling rounding using bits [2:0]: \n +/// 000: Nearest \n +/// 001: Down \n +/// 010: Up \n +/// 011: Truncate \n +/// 1XX: Use MXCSR.RC for rounding +/// \returns The converted 16-bit half-precision float value. +#define _cvtss_sh(a, imm) \ + ((unsigned short)(((__v8hi)__builtin_ia32_vcvtps2ph((__v4sf){a, 0, 0, 0}, \ + (imm)))[0])) +/// \brief Converts a 128-bit vector containing 32-bit float values into a +/// 128-bit vector containing 16-bit half-precision float values. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_cvtps_ph(__m128 a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VCVTPS2PH instruction. +/// +/// \param a +/// A 128-bit vector containing 32-bit float values. +/// \param imm +/// An immediate value controlling rounding using bits [2:0]: \n +/// 000: Nearest \n +/// 001: Down \n +/// 010: Up \n +/// 011: Truncate \n +/// 1XX: Use MXCSR.RC for rounding +/// \returns A 128-bit vector containing converted 16-bit half-precision float +/// values. The lower 64 bits are used to store the converted 16-bit +/// half-precision floating-point values. +#define _mm_cvtps_ph(a, imm) \ + ((__m128i)__builtin_ia32_vcvtps2ph((__v4sf)(__m128)(a), (imm))) + +/// \brief Converts a 128-bit vector containing 16-bit half-precision float +/// values into a 128-bit vector containing 32-bit float values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPH2PS instruction. +/// +/// \param __a +/// A 128-bit vector containing 16-bit half-precision float values. The lower +/// 64 bits are used in the conversion. +/// \returns A 128-bit vector of [4 x float] containing converted float values. static __inline __m128 __DEFAULT_FN_ATTRS _mm_cvtph_ps(__m128i __a) { return (__m128)__builtin_ia32_vcvtph2ps((__v8hi)__a); } -static __inline __m256 __DEFAULT_FN_ATTRS -_mm256_cvtph_ps(__m128i __a) -{ - return (__m256)__builtin_ia32_vcvtph2ps256((__v8hi)__a); -} - #undef __DEFAULT_FN_ATTRS #endif /* __F16CINTRIN_H */ diff --git a/c_headers/float.h b/c_headers/float.h index 238cf76b0..0f453d87c 100644 --- a/c_headers/float.h +++ b/c_headers/float.h @@ -27,9 +27,12 @@ /* If we're on MinGW, fall back to the system's float.h, which might have * additional definitions provided for Windows. * For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx + * + * Also fall back on Darwin to allow additional definitions and + * implementation-defined values. */ -#if (defined(__MINGW32__) || defined(_MSC_VER)) && __STDC_HOSTED__ && \ - __has_include_next() +#if (defined(__APPLE__) || (defined(__MINGW32__) || defined(_MSC_VER))) && \ + __STDC_HOSTED__ && __has_include_next() # include_next /* Undefine anything that we'll be redefining below. */ @@ -39,7 +42,9 @@ # undef FLT_MANT_DIG # undef DBL_MANT_DIG # undef LDBL_MANT_DIG -# undef DECIMAL_DIG +# if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) +# undef DECIMAL_DIG +# endif # undef FLT_DIG # undef DBL_DIG # undef LDBL_DIG @@ -68,6 +73,9 @@ # undef FLT_TRUE_MIN # undef DBL_TRUE_MIN # undef LDBL_TRUE_MIN +# undef FLT_DECIMAL_DIG +# undef DBL_DECIMAL_DIG +# undef LDBL_DECIMAL_DIG # endif #endif @@ -81,7 +89,9 @@ #define DBL_MANT_DIG __DBL_MANT_DIG__ #define LDBL_MANT_DIG __LDBL_MANT_DIG__ -#define DECIMAL_DIG __DECIMAL_DIG__ +#if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) +# define DECIMAL_DIG __DECIMAL_DIG__ +#endif #define FLT_DIG __FLT_DIG__ #define DBL_DIG __DBL_DIG__ @@ -119,6 +129,9 @@ # define FLT_TRUE_MIN __FLT_DENORM_MIN__ # define DBL_TRUE_MIN __DBL_DENORM_MIN__ # define LDBL_TRUE_MIN __LDBL_DENORM_MIN__ +# define FLT_DECIMAL_DIG __FLT_DECIMAL_DIG__ +# define DBL_DECIMAL_DIG __DBL_DECIMAL_DIG__ +# define LDBL_DECIMAL_DIG __LDBL_DECIMAL_DIG__ #endif #endif /* __FLOAT_H */ diff --git a/c_headers/fma4intrin.h b/c_headers/fma4intrin.h index d6405cf02..11aa8ceac 100644 --- a/c_headers/fma4intrin.h +++ b/c_headers/fma4intrin.h @@ -28,209 +28,203 @@ #ifndef __FMA4INTRIN_H #define __FMA4INTRIN_H -#ifndef __FMA4__ -# error "FMA4 instruction set is not enabled" -#else - #include /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("fma4"))) static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_macc_ps(__m128 __A, __m128 __B, __m128 __C) { - return (__m128)__builtin_ia32_vfmaddps(__A, __B, __C); + return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_macc_pd(__m128d __A, __m128d __B, __m128d __C) { - return (__m128d)__builtin_ia32_vfmaddpd(__A, __B, __C); + return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C); } static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_macc_ss(__m128 __A, __m128 __B, __m128 __C) { - return (__m128)__builtin_ia32_vfmaddss(__A, __B, __C); + return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_macc_sd(__m128d __A, __m128d __B, __m128d __C) { - return (__m128d)__builtin_ia32_vfmaddsd(__A, __B, __C); + return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, (__v2df)__C); } static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_msub_ps(__m128 __A, __m128 __B, __m128 __C) { - return (__m128)__builtin_ia32_vfmsubps(__A, __B, __C); + return (__m128)__builtin_ia32_vfmsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_msub_pd(__m128d __A, __m128d __B, __m128d __C) { - return (__m128d)__builtin_ia32_vfmsubpd(__A, __B, __C); + return (__m128d)__builtin_ia32_vfmsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C); } static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_msub_ss(__m128 __A, __m128 __B, __m128 __C) { - return (__m128)__builtin_ia32_vfmsubss(__A, __B, __C); + return (__m128)__builtin_ia32_vfmsubss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_msub_sd(__m128d __A, __m128d __B, __m128d __C) { - return (__m128d)__builtin_ia32_vfmsubsd(__A, __B, __C); + return (__m128d)__builtin_ia32_vfmsubsd((__v2df)__A, (__v2df)__B, (__v2df)__C); } static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_nmacc_ps(__m128 __A, __m128 __B, __m128 __C) { - return (__m128)__builtin_ia32_vfnmaddps(__A, __B, __C); + return (__m128)__builtin_ia32_vfnmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_nmacc_pd(__m128d __A, __m128d __B, __m128d __C) { - return (__m128d)__builtin_ia32_vfnmaddpd(__A, __B, __C); + return (__m128d)__builtin_ia32_vfnmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C); } static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_nmacc_ss(__m128 __A, __m128 __B, __m128 __C) { - return (__m128)__builtin_ia32_vfnmaddss(__A, __B, __C); + return (__m128)__builtin_ia32_vfnmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_nmacc_sd(__m128d __A, __m128d __B, __m128d __C) { - return (__m128d)__builtin_ia32_vfnmaddsd(__A, __B, __C); + return (__m128d)__builtin_ia32_vfnmaddsd((__v2df)__A, (__v2df)__B, (__v2df)__C); } static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_nmsub_ps(__m128 __A, __m128 __B, __m128 __C) { - return (__m128)__builtin_ia32_vfnmsubps(__A, __B, __C); + return (__m128)__builtin_ia32_vfnmsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_nmsub_pd(__m128d __A, __m128d __B, __m128d __C) { - return (__m128d)__builtin_ia32_vfnmsubpd(__A, __B, __C); + return (__m128d)__builtin_ia32_vfnmsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C); } static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_nmsub_ss(__m128 __A, __m128 __B, __m128 __C) { - return (__m128)__builtin_ia32_vfnmsubss(__A, __B, __C); + return (__m128)__builtin_ia32_vfnmsubss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_nmsub_sd(__m128d __A, __m128d __B, __m128d __C) { - return (__m128d)__builtin_ia32_vfnmsubsd(__A, __B, __C); + return (__m128d)__builtin_ia32_vfnmsubsd((__v2df)__A, (__v2df)__B, (__v2df)__C); } static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_maddsub_ps(__m128 __A, __m128 __B, __m128 __C) { - return (__m128)__builtin_ia32_vfmaddsubps(__A, __B, __C); + return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_maddsub_pd(__m128d __A, __m128d __B, __m128d __C) { - return (__m128d)__builtin_ia32_vfmaddsubpd(__A, __B, __C); + return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C); } static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_msubadd_ps(__m128 __A, __m128 __B, __m128 __C) { - return (__m128)__builtin_ia32_vfmsubaddps(__A, __B, __C); + return (__m128)__builtin_ia32_vfmsubaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_msubadd_pd(__m128d __A, __m128d __B, __m128d __C) { - return (__m128d)__builtin_ia32_vfmsubaddpd(__A, __B, __C); + return (__m128d)__builtin_ia32_vfmsubaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C); } static __inline__ __m256 __DEFAULT_FN_ATTRS _mm256_macc_ps(__m256 __A, __m256 __B, __m256 __C) { - return (__m256)__builtin_ia32_vfmaddps256(__A, __B, __C); + return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); } static __inline__ __m256d __DEFAULT_FN_ATTRS _mm256_macc_pd(__m256d __A, __m256d __B, __m256d __C) { - return (__m256d)__builtin_ia32_vfmaddpd256(__A, __B, __C); + return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); } static __inline__ __m256 __DEFAULT_FN_ATTRS _mm256_msub_ps(__m256 __A, __m256 __B, __m256 __C) { - return (__m256)__builtin_ia32_vfmsubps256(__A, __B, __C); + return (__m256)__builtin_ia32_vfmsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); } static __inline__ __m256d __DEFAULT_FN_ATTRS _mm256_msub_pd(__m256d __A, __m256d __B, __m256d __C) { - return (__m256d)__builtin_ia32_vfmsubpd256(__A, __B, __C); + return (__m256d)__builtin_ia32_vfmsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); } static __inline__ __m256 __DEFAULT_FN_ATTRS _mm256_nmacc_ps(__m256 __A, __m256 __B, __m256 __C) { - return (__m256)__builtin_ia32_vfnmaddps256(__A, __B, __C); + return (__m256)__builtin_ia32_vfnmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); } static __inline__ __m256d __DEFAULT_FN_ATTRS _mm256_nmacc_pd(__m256d __A, __m256d __B, __m256d __C) { - return (__m256d)__builtin_ia32_vfnmaddpd256(__A, __B, __C); + return (__m256d)__builtin_ia32_vfnmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); } static __inline__ __m256 __DEFAULT_FN_ATTRS _mm256_nmsub_ps(__m256 __A, __m256 __B, __m256 __C) { - return (__m256)__builtin_ia32_vfnmsubps256(__A, __B, __C); + return (__m256)__builtin_ia32_vfnmsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); } static __inline__ __m256d __DEFAULT_FN_ATTRS _mm256_nmsub_pd(__m256d __A, __m256d __B, __m256d __C) { - return (__m256d)__builtin_ia32_vfnmsubpd256(__A, __B, __C); + return (__m256d)__builtin_ia32_vfnmsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); } static __inline__ __m256 __DEFAULT_FN_ATTRS _mm256_maddsub_ps(__m256 __A, __m256 __B, __m256 __C) { - return (__m256)__builtin_ia32_vfmaddsubps256(__A, __B, __C); + return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); } static __inline__ __m256d __DEFAULT_FN_ATTRS _mm256_maddsub_pd(__m256d __A, __m256d __B, __m256d __C) { - return (__m256d)__builtin_ia32_vfmaddsubpd256(__A, __B, __C); + return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); } static __inline__ __m256 __DEFAULT_FN_ATTRS _mm256_msubadd_ps(__m256 __A, __m256 __B, __m256 __C) { - return (__m256)__builtin_ia32_vfmsubaddps256(__A, __B, __C); + return (__m256)__builtin_ia32_vfmsubaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); } static __inline__ __m256d __DEFAULT_FN_ATTRS _mm256_msubadd_pd(__m256d __A, __m256d __B, __m256d __C) { - return (__m256d)__builtin_ia32_vfmsubaddpd256(__A, __B, __C); + return (__m256d)__builtin_ia32_vfmsubaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); } #undef __DEFAULT_FN_ATTRS -#endif /* __FMA4__ */ - #endif /* __FMA4INTRIN_H */ diff --git a/c_headers/fmaintrin.h b/c_headers/fmaintrin.h index ad693fed0..0e2ef0b17 100644 --- a/c_headers/fmaintrin.h +++ b/c_headers/fmaintrin.h @@ -28,207 +28,201 @@ #ifndef __FMAINTRIN_H #define __FMAINTRIN_H -#ifndef __FMA__ -# error "FMA instruction set is not enabled" -#else - /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("fma"))) static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C) { - return (__m128)__builtin_ia32_vfmaddps(__A, __B, __C); + return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C) { - return (__m128d)__builtin_ia32_vfmaddpd(__A, __B, __C); + return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C); } static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C) { - return (__m128)__builtin_ia32_vfmaddss(__A, __B, __C); + return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C) { - return (__m128d)__builtin_ia32_vfmaddsd(__A, __B, __C); + return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, (__v2df)__C); } static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C) { - return (__m128)__builtin_ia32_vfmsubps(__A, __B, __C); + return (__m128)__builtin_ia32_vfmsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C) { - return (__m128d)__builtin_ia32_vfmsubpd(__A, __B, __C); + return (__m128d)__builtin_ia32_vfmsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C); } static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_fmsub_ss(__m128 __A, __m128 __B, __m128 __C) { - return (__m128)__builtin_ia32_vfmsubss(__A, __B, __C); + return (__m128)__builtin_ia32_vfmsubss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C) { - return (__m128d)__builtin_ia32_vfmsubsd(__A, __B, __C); + return (__m128d)__builtin_ia32_vfmsubsd((__v2df)__A, (__v2df)__B, (__v2df)__C); } static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C) { - return (__m128)__builtin_ia32_vfnmaddps(__A, __B, __C); + return (__m128)__builtin_ia32_vfnmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C) { - return (__m128d)__builtin_ia32_vfnmaddpd(__A, __B, __C); + return (__m128d)__builtin_ia32_vfnmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C); } static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_fnmadd_ss(__m128 __A, __m128 __B, __m128 __C) { - return (__m128)__builtin_ia32_vfnmaddss(__A, __B, __C); + return (__m128)__builtin_ia32_vfnmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C) { - return (__m128d)__builtin_ia32_vfnmaddsd(__A, __B, __C); + return (__m128d)__builtin_ia32_vfnmaddsd((__v2df)__A, (__v2df)__B, (__v2df)__C); } static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C) { - return (__m128)__builtin_ia32_vfnmsubps(__A, __B, __C); + return (__m128)__builtin_ia32_vfnmsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C) { - return (__m128d)__builtin_ia32_vfnmsubpd(__A, __B, __C); + return (__m128d)__builtin_ia32_vfnmsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C); } static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_fnmsub_ss(__m128 __A, __m128 __B, __m128 __C) { - return (__m128)__builtin_ia32_vfnmsubss(__A, __B, __C); + return (__m128)__builtin_ia32_vfnmsubss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_fnmsub_sd(__m128d __A, __m128d __B, __m128d __C) { - return (__m128d)__builtin_ia32_vfnmsubsd(__A, __B, __C); + return (__m128d)__builtin_ia32_vfnmsubsd((__v2df)__A, (__v2df)__B, (__v2df)__C); } static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C) { - return (__m128)__builtin_ia32_vfmaddsubps(__A, __B, __C); + return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C) { - return (__m128d)__builtin_ia32_vfmaddsubpd(__A, __B, __C); + return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C); } static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C) { - return (__m128)__builtin_ia32_vfmsubaddps(__A, __B, __C); + return (__m128)__builtin_ia32_vfmsubaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C) { - return (__m128d)__builtin_ia32_vfmsubaddpd(__A, __B, __C); + return (__m128d)__builtin_ia32_vfmsubaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C); } static __inline__ __m256 __DEFAULT_FN_ATTRS _mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C) { - return (__m256)__builtin_ia32_vfmaddps256(__A, __B, __C); + return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); } static __inline__ __m256d __DEFAULT_FN_ATTRS _mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C) { - return (__m256d)__builtin_ia32_vfmaddpd256(__A, __B, __C); + return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); } static __inline__ __m256 __DEFAULT_FN_ATTRS _mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C) { - return (__m256)__builtin_ia32_vfmsubps256(__A, __B, __C); + return (__m256)__builtin_ia32_vfmsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); } static __inline__ __m256d __DEFAULT_FN_ATTRS _mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C) { - return (__m256d)__builtin_ia32_vfmsubpd256(__A, __B, __C); + return (__m256d)__builtin_ia32_vfmsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); } static __inline__ __m256 __DEFAULT_FN_ATTRS _mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C) { - return (__m256)__builtin_ia32_vfnmaddps256(__A, __B, __C); + return (__m256)__builtin_ia32_vfnmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); } static __inline__ __m256d __DEFAULT_FN_ATTRS _mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C) { - return (__m256d)__builtin_ia32_vfnmaddpd256(__A, __B, __C); + return (__m256d)__builtin_ia32_vfnmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); } static __inline__ __m256 __DEFAULT_FN_ATTRS _mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C) { - return (__m256)__builtin_ia32_vfnmsubps256(__A, __B, __C); + return (__m256)__builtin_ia32_vfnmsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); } static __inline__ __m256d __DEFAULT_FN_ATTRS _mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C) { - return (__m256d)__builtin_ia32_vfnmsubpd256(__A, __B, __C); + return (__m256d)__builtin_ia32_vfnmsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); } static __inline__ __m256 __DEFAULT_FN_ATTRS _mm256_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C) { - return (__m256)__builtin_ia32_vfmaddsubps256(__A, __B, __C); + return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); } static __inline__ __m256d __DEFAULT_FN_ATTRS _mm256_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C) { - return (__m256d)__builtin_ia32_vfmaddsubpd256(__A, __B, __C); + return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); } static __inline__ __m256 __DEFAULT_FN_ATTRS _mm256_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C) { - return (__m256)__builtin_ia32_vfmsubaddps256(__A, __B, __C); + return (__m256)__builtin_ia32_vfmsubaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); } static __inline__ __m256d __DEFAULT_FN_ATTRS _mm256_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C) { - return (__m256d)__builtin_ia32_vfmsubaddpd256(__A, __B, __C); + return (__m256d)__builtin_ia32_vfmsubaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); } #undef __DEFAULT_FN_ATTRS -#endif /* __FMA__ */ - #endif /* __FMAINTRIN_H */ diff --git a/c_headers/fxsrintrin.h b/c_headers/fxsrintrin.h index 2b3549c05..786081ca8 100644 --- a/c_headers/fxsrintrin.h +++ b/c_headers/fxsrintrin.h @@ -28,27 +28,77 @@ #ifndef __FXSRINTRIN_H #define __FXSRINTRIN_H -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("fxsr"))) +/// \brief Saves the XMM, MMX, MXCSR and x87 FPU registers into a 512-byte +/// memory region pointed to by the input parameter \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the FXSAVE instruction. +/// +/// \param __p +/// A pointer to a 512-byte memory region. The beginning of this memory +/// region should be aligned on a 16-byte boundary. static __inline__ void __DEFAULT_FN_ATTRS -_fxsave(void *__p) { +_fxsave(void *__p) +{ return __builtin_ia32_fxsave(__p); } +/// \brief Restores the XMM, MMX, MXCSR and x87 FPU registers from the 512-byte +/// memory region pointed to by the input parameter \a __p. The contents of +/// this memory region should have been written to by a previous \c _fxsave +/// or \c _fxsave64 intrinsic. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the FXRSTOR instruction. +/// +/// \param __p +/// A pointer to a 512-byte memory region. The beginning of this memory +/// region should be aligned on a 16-byte boundary. static __inline__ void __DEFAULT_FN_ATTRS -_fxsave64(void *__p) { - return __builtin_ia32_fxsave64(__p); -} - -static __inline__ void __DEFAULT_FN_ATTRS -_fxrstor(void *__p) { +_fxrstor(void *__p) +{ return __builtin_ia32_fxrstor(__p); } +#ifdef __x86_64__ +/// \brief Saves the XMM, MMX, MXCSR and x87 FPU registers into a 512-byte +/// memory region pointed to by the input parameter \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the FXSAVE64 instruction. +/// +/// \param __p +/// A pointer to a 512-byte memory region. The beginning of this memory +/// region should be aligned on a 16-byte boundary. static __inline__ void __DEFAULT_FN_ATTRS -_fxrstor64(void *__p) { +_fxsave64(void *__p) +{ + return __builtin_ia32_fxsave64(__p); +} + +/// \brief Restores the XMM, MMX, MXCSR and x87 FPU registers from the 512-byte +/// memory region pointed to by the input parameter \a __p. The contents of +/// this memory region should have been written to by a previous \c _fxsave +/// or \c _fxsave64 intrinsic. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the FXRSTOR64 instruction. +/// +/// \param __p +/// A pointer to a 512-byte memory region. The beginning of this memory +/// region should be aligned on a 16-byte boundary. +static __inline__ void __DEFAULT_FN_ATTRS +_fxrstor64(void *__p) +{ return __builtin_ia32_fxrstor64(__p); } +#endif #undef __DEFAULT_FN_ATTRS diff --git a/c_headers/htmintrin.h b/c_headers/htmintrin.h index 0088c7cca..69c8d7bb5 100644 --- a/c_headers/htmintrin.h +++ b/c_headers/htmintrin.h @@ -164,24 +164,24 @@ struct __htm_tdb { /* Helper intrinsics to retry tbegin in case of transient failure. */ static __inline int __attribute__((__always_inline__, __nodebug__)) -__builtin_tbegin_retry_null (int retry) +__builtin_tbegin_retry_null (int __retry) { int cc, i = 0; while ((cc = __builtin_tbegin(0)) == _HTM_TBEGIN_TRANSIENT - && i++ < retry) + && i++ < __retry) __builtin_tx_assist(i); return cc; } static __inline int __attribute__((__always_inline__, __nodebug__)) -__builtin_tbegin_retry_tdb (void *tdb, int retry) +__builtin_tbegin_retry_tdb (void *__tdb, int __retry) { int cc, i = 0; - while ((cc = __builtin_tbegin(tdb)) == _HTM_TBEGIN_TRANSIENT - && i++ < retry) + while ((cc = __builtin_tbegin(__tdb)) == _HTM_TBEGIN_TRANSIENT + && i++ < __retry) __builtin_tx_assist(i); return cc; @@ -193,24 +193,24 @@ __builtin_tbegin_retry_tdb (void *tdb, int retry) __builtin_tbegin_retry_tdb(tdb, retry)) static __inline int __attribute__((__always_inline__, __nodebug__)) -__builtin_tbegin_retry_nofloat_null (int retry) +__builtin_tbegin_retry_nofloat_null (int __retry) { int cc, i = 0; while ((cc = __builtin_tbegin_nofloat(0)) == _HTM_TBEGIN_TRANSIENT - && i++ < retry) + && i++ < __retry) __builtin_tx_assist(i); return cc; } static __inline int __attribute__((__always_inline__, __nodebug__)) -__builtin_tbegin_retry_nofloat_tdb (void *tdb, int retry) +__builtin_tbegin_retry_nofloat_tdb (void *__tdb, int __retry) { int cc, i = 0; - while ((cc = __builtin_tbegin_nofloat(tdb)) == _HTM_TBEGIN_TRANSIENT - && i++ < retry) + while ((cc = __builtin_tbegin_nofloat(__tdb)) == _HTM_TBEGIN_TRANSIENT + && i++ < __retry) __builtin_tx_assist(i); return cc; diff --git a/c_headers/htmxlintrin.h b/c_headers/htmxlintrin.h index 30f524d5d..16dc7056c 100644 --- a/c_headers/htmxlintrin.h +++ b/c_headers/htmxlintrin.h @@ -46,7 +46,7 @@ extern "C" { typedef char TM_buff_type[16]; -/* This macro can be used to determine whether a transaction was successfully +/* This macro can be used to determine whether a transaction was successfully started from the __TM_begin() and __TM_simple_begin() intrinsic functions below. */ #define _HTM_TBEGIN_STARTED 1 @@ -62,18 +62,18 @@ __TM_simple_begin (void) extern __inline long __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -__TM_begin (void* const TM_buff) +__TM_begin (void* const __TM_buff) { - *_TEXASRL_PTR (TM_buff) = 0; + *_TEXASRL_PTR (__TM_buff) = 0; if (__builtin_expect (__builtin_tbegin (0), 1)) return _HTM_TBEGIN_STARTED; #ifdef __powerpc64__ - *_TEXASR_PTR (TM_buff) = __builtin_get_texasr (); + *_TEXASR_PTR (__TM_buff) = __builtin_get_texasr (); #else - *_TEXASRU_PTR (TM_buff) = __builtin_get_texasru (); - *_TEXASRL_PTR (TM_buff) = __builtin_get_texasr (); + *_TEXASRU_PTR (__TM_buff) = __builtin_get_texasru (); + *_TEXASRL_PTR (__TM_buff) = __builtin_get_texasr (); #endif - *_TFIAR_PTR (TM_buff) = __builtin_get_tfiar (); + *_TFIAR_PTR (__TM_buff) = __builtin_get_tfiar (); return 0; } @@ -95,9 +95,9 @@ __TM_abort (void) extern __inline void __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -__TM_named_abort (unsigned char const code) +__TM_named_abort (unsigned char const __code) { - __builtin_tabort (code); + __builtin_tabort (__code); } extern __inline void @@ -116,47 +116,47 @@ __TM_suspend (void) extern __inline long __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -__TM_is_user_abort (void* const TM_buff) +__TM_is_user_abort (void* const __TM_buff) { - texasru_t texasru = *_TEXASRU_PTR (TM_buff); + texasru_t texasru = *_TEXASRU_PTR (__TM_buff); return _TEXASRU_ABORT (texasru); } extern __inline long __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -__TM_is_named_user_abort (void* const TM_buff, unsigned char *code) +__TM_is_named_user_abort (void* const __TM_buff, unsigned char *__code) { - texasru_t texasru = *_TEXASRU_PTR (TM_buff); + texasru_t texasru = *_TEXASRU_PTR (__TM_buff); - *code = _TEXASRU_FAILURE_CODE (texasru); + *__code = _TEXASRU_FAILURE_CODE (texasru); return _TEXASRU_ABORT (texasru); } extern __inline long __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -__TM_is_illegal (void* const TM_buff) +__TM_is_illegal (void* const __TM_buff) { - texasru_t texasru = *_TEXASRU_PTR (TM_buff); + texasru_t texasru = *_TEXASRU_PTR (__TM_buff); return _TEXASRU_DISALLOWED (texasru); } extern __inline long __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -__TM_is_footprint_exceeded (void* const TM_buff) +__TM_is_footprint_exceeded (void* const __TM_buff) { - texasru_t texasru = *_TEXASRU_PTR (TM_buff); + texasru_t texasru = *_TEXASRU_PTR (__TM_buff); return _TEXASRU_FOOTPRINT_OVERFLOW (texasru); } extern __inline long __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -__TM_nesting_depth (void* const TM_buff) +__TM_nesting_depth (void* const __TM_buff) { texasrl_t texasrl; if (_HTM_STATE (__builtin_ttest ()) == _HTM_NONTRANSACTIONAL) { - texasrl = *_TEXASRL_PTR (TM_buff); + texasrl = *_TEXASRL_PTR (__TM_buff); if (!_TEXASR_FAILURE_SUMMARY (texasrl)) texasrl = 0; } @@ -168,15 +168,15 @@ __TM_nesting_depth (void* const TM_buff) extern __inline long __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -__TM_is_nested_too_deep(void* const TM_buff) +__TM_is_nested_too_deep(void* const __TM_buff) { - texasru_t texasru = *_TEXASRU_PTR (TM_buff); + texasru_t texasru = *_TEXASRU_PTR (__TM_buff); return _TEXASRU_NESTING_OVERFLOW (texasru); } extern __inline long __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -__TM_is_conflict(void* const TM_buff) +__TM_is_conflict(void* const __TM_buff) { texasru_t texasru = *_TEXASRU_PTR (TM_buff); /* Return TEXASR bits 11 (Self-Induced Conflict) through @@ -186,24 +186,24 @@ __TM_is_conflict(void* const TM_buff) extern __inline long __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -__TM_is_failure_persistent(void* const TM_buff) +__TM_is_failure_persistent(void* const __TM_buff) { - texasru_t texasru = *_TEXASRU_PTR (TM_buff); + texasru_t texasru = *_TEXASRU_PTR (__TM_buff); return _TEXASRU_FAILURE_PERSISTENT (texasru); } extern __inline long __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -__TM_failure_address(void* const TM_buff) +__TM_failure_address(void* const __TM_buff) { - return *_TFIAR_PTR (TM_buff); + return *_TFIAR_PTR (__TM_buff); } extern __inline long long __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -__TM_failure_code(void* const TM_buff) +__TM_failure_code(void* const __TM_buff) { - return *_TEXASR_PTR (TM_buff); + return *_TEXASR_PTR (__TM_buff); } #ifdef __cplusplus @@ -227,9 +227,9 @@ __TM_simple_begin () } static __inline long __attribute__((__always_inline__, __nodebug__)) -__TM_begin (void* const tdb) +__TM_begin (void* const __tdb) { - return __builtin_tbegin_nofloat (tdb); + return __builtin_tbegin_nofloat (__tdb); } static __inline long __attribute__((__always_inline__, __nodebug__)) @@ -245,22 +245,22 @@ __TM_abort () } static __inline void __attribute__((__always_inline__, __nodebug__)) -__TM_named_abort (unsigned char const code) +__TM_named_abort (unsigned char const __code) { - return __builtin_tabort ((int)_HTM_FIRST_USER_ABORT_CODE + code); + return __builtin_tabort ((int)_HTM_FIRST_USER_ABORT_CODE + __code); } static __inline void __attribute__((__always_inline__, __nodebug__)) -__TM_non_transactional_store (void* const addr, long long const value) +__TM_non_transactional_store (void* const __addr, long long const __value) { - __builtin_non_tx_store ((uint64_t*)addr, (uint64_t)value); + __builtin_non_tx_store ((uint64_t*)__addr, (uint64_t)__value); } static __inline long __attribute__((__always_inline__, __nodebug__)) -__TM_nesting_depth (void* const tdb_ptr) +__TM_nesting_depth (void* const __tdb_ptr) { int depth = __builtin_tx_nesting_depth (); - struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr; + struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr; if (depth != 0) return depth; @@ -273,9 +273,9 @@ __TM_nesting_depth (void* const tdb_ptr) /* Transaction failure diagnostics */ static __inline long __attribute__((__always_inline__, __nodebug__)) -__TM_is_user_abort (void* const tdb_ptr) +__TM_is_user_abort (void* const __tdb_ptr) { - struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr; + struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr; if (tdb->format != 1) return 0; @@ -284,25 +284,25 @@ __TM_is_user_abort (void* const tdb_ptr) } static __inline long __attribute__((__always_inline__, __nodebug__)) -__TM_is_named_user_abort (void* const tdb_ptr, unsigned char* code) +__TM_is_named_user_abort (void* const __tdb_ptr, unsigned char* __code) { - struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr; + struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr; if (tdb->format != 1) return 0; if (tdb->abort_code >= _HTM_FIRST_USER_ABORT_CODE) { - *code = tdb->abort_code - _HTM_FIRST_USER_ABORT_CODE; + *__code = tdb->abort_code - _HTM_FIRST_USER_ABORT_CODE; return 1; } return 0; } static __inline long __attribute__((__always_inline__, __nodebug__)) -__TM_is_illegal (void* const tdb_ptr) +__TM_is_illegal (void* const __tdb_ptr) { - struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr; + struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr; return (tdb->format == 1 && (tdb->abort_code == 4 /* unfiltered program interruption */ @@ -310,9 +310,9 @@ __TM_is_illegal (void* const tdb_ptr) } static __inline long __attribute__((__always_inline__, __nodebug__)) -__TM_is_footprint_exceeded (void* const tdb_ptr) +__TM_is_footprint_exceeded (void* const __tdb_ptr) { - struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr; + struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr; return (tdb->format == 1 && (tdb->abort_code == 7 /* fetch overflow */ @@ -320,17 +320,17 @@ __TM_is_footprint_exceeded (void* const tdb_ptr) } static __inline long __attribute__((__always_inline__, __nodebug__)) -__TM_is_nested_too_deep (void* const tdb_ptr) +__TM_is_nested_too_deep (void* const __tdb_ptr) { - struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr; + struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr; return tdb->format == 1 && tdb->abort_code == 13; /* depth exceeded */ } static __inline long __attribute__((__always_inline__, __nodebug__)) -__TM_is_conflict (void* const tdb_ptr) +__TM_is_conflict (void* const __tdb_ptr) { - struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr; + struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr; return (tdb->format == 1 && (tdb->abort_code == 9 /* fetch conflict */ @@ -338,22 +338,22 @@ __TM_is_conflict (void* const tdb_ptr) } static __inline long __attribute__((__always_inline__, __nodebug__)) -__TM_is_failure_persistent (long const result) +__TM_is_failure_persistent (long const __result) { - return result == _HTM_TBEGIN_PERSISTENT; + return __result == _HTM_TBEGIN_PERSISTENT; } static __inline long __attribute__((__always_inline__, __nodebug__)) -__TM_failure_address (void* const tdb_ptr) +__TM_failure_address (void* const __tdb_ptr) { - struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr; + struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr; return tdb->atia; } static __inline long __attribute__((__always_inline__, __nodebug__)) -__TM_failure_code (void* const tdb_ptr) +__TM_failure_code (void* const __tdb_ptr) { - struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr; + struct __htm_tdb *tdb = (struct __htm_tdb*)__tdb_ptr; return tdb->abort_code; } diff --git a/c_headers/ia32intrin.h b/c_headers/ia32intrin.h index 5adf3f1f5..492830010 100644 --- a/c_headers/ia32intrin.h +++ b/c_headers/ia32intrin.h @@ -32,50 +32,26 @@ static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__)) __readeflags(void) { - unsigned long long __res = 0; - __asm__ __volatile__ ("pushf\n\t" - "popq %0\n" - :"=r"(__res) - : - : - ); - return __res; + return __builtin_ia32_readeflags_u64(); } static __inline__ void __attribute__((__always_inline__, __nodebug__)) __writeeflags(unsigned long long __f) { - __asm__ __volatile__ ("pushq %0\n\t" - "popf\n" - : - :"r"(__f) - :"flags" - ); + __builtin_ia32_writeeflags_u64(__f); } #else /* !__x86_64__ */ static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) __readeflags(void) { - unsigned int __res = 0; - __asm__ __volatile__ ("pushf\n\t" - "popl %0\n" - :"=r"(__res) - : - : - ); - return __res; + return __builtin_ia32_readeflags_u32(); } static __inline__ void __attribute__((__always_inline__, __nodebug__)) __writeeflags(unsigned int __f) { - __asm__ __volatile__ ("pushl %0\n\t" - "popf\n" - : - :"r"(__f) - :"flags" - ); + __builtin_ia32_writeeflags_u32(__f); } #endif /* !__x86_64__ */ @@ -84,12 +60,6 @@ __rdpmc(int __A) { return __builtin_ia32_rdpmc(__A); } -/* __rdtsc */ -static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__)) -__rdtsc(void) { - return __builtin_ia32_rdtsc(); -} - /* __rdtscp */ static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__)) __rdtscp(unsigned int *__A) { @@ -98,4 +68,6 @@ __rdtscp(unsigned int *__A) { #define _rdtsc() __rdtsc() +#define _rdpmc(A) __rdpmc(A) + #endif /* __IA32INTRIN_H */ diff --git a/c_headers/immintrin.h b/c_headers/immintrin.h index 21ad3281f..7f91d49fb 100644 --- a/c_headers/immintrin.h +++ b/c_headers/immintrin.h @@ -24,105 +24,204 @@ #ifndef __IMMINTRIN_H #define __IMMINTRIN_H -#ifdef __MMX__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__MMX__) #include #endif -#ifdef __SSE__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE__) #include #endif -#ifdef __SSE2__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE2__) #include #endif -#ifdef __SSE3__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE3__) #include #endif -#ifdef __SSSE3__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSSE3__) #include #endif -#if defined (__SSE4_2__) || defined (__SSE4_1__) +#if !defined(_MSC_VER) || __has_feature(modules) || \ + (defined(__SSE4_2__) || defined(__SSE4_1__)) #include #endif -#if defined (__AES__) || defined (__PCLMUL__) +#if !defined(_MSC_VER) || __has_feature(modules) || \ + (defined(__AES__) || defined(__PCLMUL__)) #include #endif -#ifdef __AVX__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLFLUSHOPT__) +#include +#endif + +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX__) #include #endif -#ifdef __AVX2__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX2__) #include -#endif -#ifdef __BMI__ +/* The 256-bit versions of functions in f16cintrin.h. + Intel documents these as being in immintrin.h, and + they depend on typedefs from avxintrin.h. */ + +/// \brief Converts a 256-bit vector of [8 x float] into a 128-bit vector +/// containing 16-bit half-precision float values. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm256_cvtps_ph(__m256 a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VCVTPS2PH instruction. +/// +/// \param a +/// A 256-bit vector containing 32-bit single-precision float values to be +/// converted to 16-bit half-precision float values. +/// \param imm +/// An immediate value controlling rounding using bits [2:0]: \n +/// 000: Nearest \n +/// 001: Down \n +/// 010: Up \n +/// 011: Truncate \n +/// 1XX: Use MXCSR.RC for rounding +/// \returns A 128-bit vector containing the converted 16-bit half-precision +/// float values. +#define _mm256_cvtps_ph(a, imm) __extension__ ({ \ + (__m128i)__builtin_ia32_vcvtps2ph256((__v8sf)(__m256)(a), (imm)); }) + +/// \brief Converts a 128-bit vector containing 16-bit half-precision float +/// values into a 256-bit vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPH2PS instruction. +/// +/// \param __a +/// A 128-bit vector containing 16-bit half-precision float values to be +/// converted to 32-bit single-precision float values. +/// \returns A vector of [8 x float] containing the converted 32-bit +/// single-precision float values. +static __inline __m256 __attribute__((__always_inline__, __nodebug__, __target__("f16c"))) +_mm256_cvtph_ps(__m128i __a) +{ + return (__m256)__builtin_ia32_vcvtph2ps256((__v8hi)__a); +} +#endif /* __AVX2__ */ + +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI__) #include #endif -#ifdef __BMI2__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI2__) #include #endif -#ifdef __LZCNT__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__LZCNT__) #include #endif -#ifdef __FMA__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FMA__) #include #endif -#ifdef __AVX512F__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512F__) #include #endif -#ifdef __AVX512VL__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VL__) #include #endif -#ifdef __AVX512BW__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512BW__) #include #endif -#ifdef __AVX512CD__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512CD__) #include #endif -#ifdef __AVX512DQ__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512DQ__) #include #endif -#if defined (__AVX512VL__) && defined (__AVX512BW__) +#if !defined(_MSC_VER) || __has_feature(modules) || \ + (defined(__AVX512VL__) && defined(__AVX512BW__)) #include #endif -#if defined (__AVX512VL__) && defined (__AVX512DQ__) +#if !defined(_MSC_VER) || __has_feature(modules) || \ + (defined(__AVX512VL__) && defined(__AVX512CD__)) +#include +#endif + +#if !defined(_MSC_VER) || __has_feature(modules) || \ + (defined(__AVX512VL__) && defined(__AVX512DQ__)) #include #endif -#ifdef __AVX512ER__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512ER__) #include #endif -#ifdef __RDRND__ -static __inline__ int __attribute__((__always_inline__, __nodebug__)) +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512IFMA__) +#include +#endif + +#if !defined(_MSC_VER) || __has_feature(modules) || \ + (defined(__AVX512IFMA__) && defined(__AVX512VL__)) +#include +#endif + +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VBMI__) +#include +#endif + +#if !defined(_MSC_VER) || __has_feature(modules) || \ + (defined(__AVX512VBMI__) && defined(__AVX512VL__)) +#include +#endif + +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512PF__) +#include +#endif + +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PKU__) +#include +#endif + +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDRND__) +static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd"))) _rdrand16_step(unsigned short *__p) { return __builtin_ia32_rdrand16_step(__p); } -static __inline__ int __attribute__((__always_inline__, __nodebug__)) +static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd"))) _rdrand32_step(unsigned int *__p) { return __builtin_ia32_rdrand32_step(__p); } -#ifdef __x86_64__ +/* __bit_scan_forward */ static __inline__ int __attribute__((__always_inline__, __nodebug__)) +_bit_scan_forward(int __A) { + return __builtin_ctz(__A); +} + +/* __bit_scan_reverse */ +static __inline__ int __attribute__((__always_inline__, __nodebug__)) +_bit_scan_reverse(int __A) { + return 31 - __builtin_clz(__A); +} + +#ifdef __x86_64__ +static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd"))) _rdrand64_step(unsigned long long *__p) { return __builtin_ia32_rdrand64_step(__p); @@ -130,71 +229,87 @@ _rdrand64_step(unsigned long long *__p) #endif #endif /* __RDRND__ */ -#ifdef __FSGSBASE__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FSGSBASE__) #ifdef __x86_64__ -static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) +static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) _readfsbase_u32(void) { return __builtin_ia32_rdfsbase32(); } -static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__)) +static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) _readfsbase_u64(void) { return __builtin_ia32_rdfsbase64(); } -static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) +static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) _readgsbase_u32(void) { return __builtin_ia32_rdgsbase32(); } -static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__)) +static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) _readgsbase_u64(void) { return __builtin_ia32_rdgsbase64(); } -static __inline__ void __attribute__((__always_inline__, __nodebug__)) +static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) _writefsbase_u32(unsigned int __V) { return __builtin_ia32_wrfsbase32(__V); } -static __inline__ void __attribute__((__always_inline__, __nodebug__)) +static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) _writefsbase_u64(unsigned long long __V) { return __builtin_ia32_wrfsbase64(__V); } -static __inline__ void __attribute__((__always_inline__, __nodebug__)) +static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) _writegsbase_u32(unsigned int __V) { return __builtin_ia32_wrgsbase32(__V); } -static __inline__ void __attribute__((__always_inline__, __nodebug__)) +static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) _writegsbase_u64(unsigned long long __V) { return __builtin_ia32_wrgsbase64(__V); } + #endif #endif /* __FSGSBASE__ */ -#ifdef __RTM__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RTM__) #include -#endif - -#ifdef __RTM__ #include #endif -#ifdef __SHA__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SHA__) #include #endif +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FXSR__) #include +#endif + +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVE__) +#include +#endif + +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVEOPT__) +#include +#endif + +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVEC__) +#include +#endif + +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVES__) +#include +#endif /* Some intrinsics inside adxintrin.h are available only on processors with ADX, * whereas others are also available at all times. */ diff --git a/c_headers/Intrin.h b/c_headers/intrin.h similarity index 58% rename from c_headers/Intrin.h rename to c_headers/intrin.h index 24b3eae8b..a35262af8 100644 --- a/c_headers/Intrin.h +++ b/c_headers/intrin.h @@ -1,4 +1,4 @@ -/* ===-------- Intrin.h ---------------------------------------------------=== +/* ===-------- intrin.h ---------------------------------------------------=== * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -23,7 +23,7 @@ /* Only include this if we're compiling for the windows platform. */ #ifndef _MSC_VER -#include_next +#include_next #else #ifndef __INTRIN_H @@ -34,6 +34,10 @@ #include #endif +#if defined(__arm__) +#include +#endif + /* For the definition of jmp_buf. */ #if __STDC_HOSTED__ #include @@ -49,10 +53,7 @@ extern "C" { #if defined(__MMX__) /* And the random ones that aren't in those files. */ __m64 _m_from_float(float); -__m64 _m_from_int(int _l); -void _m_prefetch(void *); float _m_to_float(__m64); -int _m_to_int(__m64 _M); #endif /* Other assorted instruction intrinsics. */ @@ -64,8 +65,9 @@ static __inline__ void __cpuid(int[4], int); static __inline__ void __cpuidex(int[4], int, int); -void __debugbreak(void); +static __inline__ __int64 __emul(int, int); +static __inline__ unsigned __int64 __emulu(unsigned int, unsigned int); void __cdecl __fastfail(unsigned int); unsigned int __getcallerseflags(void); @@ -96,6 +98,7 @@ static __inline__ void __movsd(unsigned long *, unsigned long const *, size_t); static __inline__ void __movsw(unsigned short *, unsigned short const *, size_t); +static __inline__ void __nop(void); void __nvreg_restore_fence(void); void __nvreg_save_fence(void); @@ -105,10 +108,6 @@ void __outdword(unsigned short, unsigned long); void __outdwordstring(unsigned short, unsigned long *, unsigned long); void __outword(unsigned short, unsigned short); void __outwordstring(unsigned short, unsigned short *, unsigned long); -static __inline__ -unsigned int __popcnt(unsigned int); -static __inline__ -unsigned short __popcnt16(unsigned short); unsigned long __readcr0(void); unsigned long __readcr2(void); static __inline__ @@ -120,8 +119,6 @@ unsigned int __readdr(unsigned int); static __inline__ unsigned char __readfsbyte(unsigned long); static __inline__ -unsigned long __readfsdword(unsigned long); -static __inline__ unsigned __int64 __readfsqword(unsigned long); static __inline__ unsigned short __readfsword(unsigned long); @@ -175,107 +172,34 @@ static __inline__ unsigned char _bittestandreset(long *, long); static __inline__ unsigned char _bittestandset(long *, long); -unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64); -unsigned long __cdecl _byteswap_ulong(unsigned long); -unsigned short __cdecl _byteswap_ushort(unsigned short); void __cdecl _disable(void); void __cdecl _enable(void); long _InterlockedAddLargeStatistic(__int64 volatile *_Addend, long _Value); -static __inline__ -long _InterlockedAnd(long volatile *_Value, long _Mask); -static __inline__ -short _InterlockedAnd16(short volatile *_Value, short _Mask); -static __inline__ -char _InterlockedAnd8(char volatile *_Value, char _Mask); unsigned char _interlockedbittestandreset(long volatile *, long); static __inline__ unsigned char _interlockedbittestandset(long volatile *, long); -static __inline__ -long __cdecl _InterlockedCompareExchange(long volatile *_Destination, - long _Exchange, long _Comparand); long _InterlockedCompareExchange_HLEAcquire(long volatile *, long, long); long _InterlockedCompareExchange_HLERelease(long volatile *, long, long); -static __inline__ -short _InterlockedCompareExchange16(short volatile *_Destination, - short _Exchange, short _Comparand); -static __inline__ -__int64 _InterlockedCompareExchange64(__int64 volatile *_Destination, - __int64 _Exchange, __int64 _Comparand); __int64 _InterlockedcompareExchange64_HLEAcquire(__int64 volatile *, __int64, __int64); __int64 _InterlockedCompareExchange64_HLERelease(__int64 volatile *, __int64, __int64); -static __inline__ -char _InterlockedCompareExchange8(char volatile *_Destination, char _Exchange, - char _Comparand); void *_InterlockedCompareExchangePointer_HLEAcquire(void *volatile *, void *, void *); void *_InterlockedCompareExchangePointer_HLERelease(void *volatile *, void *, void *); -static __inline__ -long __cdecl _InterlockedDecrement(long volatile *_Addend); -static __inline__ -short _InterlockedDecrement16(short volatile *_Addend); -long _InterlockedExchange(long volatile *_Target, long _Value); -static __inline__ -short _InterlockedExchange16(short volatile *_Target, short _Value); -static __inline__ -char _InterlockedExchange8(char volatile *_Target, char _Value); -static __inline__ -long __cdecl _InterlockedExchangeAdd(long volatile *_Addend, long _Value); long _InterlockedExchangeAdd_HLEAcquire(long volatile *, long); long _InterlockedExchangeAdd_HLERelease(long volatile *, long); -static __inline__ -short _InterlockedExchangeAdd16(short volatile *_Addend, short _Value); __int64 _InterlockedExchangeAdd64_HLEAcquire(__int64 volatile *, __int64); __int64 _InterlockedExchangeAdd64_HLERelease(__int64 volatile *, __int64); -static __inline__ -char _InterlockedExchangeAdd8(char volatile *_Addend, char _Value); -static __inline__ -long __cdecl _InterlockedIncrement(long volatile *_Addend); -static __inline__ -short _InterlockedIncrement16(short volatile *_Addend); -static __inline__ -long _InterlockedOr(long volatile *_Value, long _Mask); -static __inline__ -short _InterlockedOr16(short volatile *_Value, short _Mask); -static __inline__ -char _InterlockedOr8(char volatile *_Value, char _Mask); -static __inline__ -long _InterlockedXor(long volatile *_Value, long _Mask); -static __inline__ -short _InterlockedXor16(short volatile *_Value, short _Mask); -static __inline__ -char _InterlockedXor8(char volatile *_Value, char _Mask); void __cdecl _invpcid(unsigned int, void *); -static __inline__ -unsigned long __cdecl _lrotl(unsigned long, int); -static __inline__ -unsigned long __cdecl _lrotr(unsigned long, int); -static __inline__ -static __inline__ -void _ReadBarrier(void); -static __inline__ -void _ReadWriteBarrier(void); -static __inline__ -void *_ReturnAddress(void); +static __inline__ void +__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead"))) +_ReadBarrier(void); +static __inline__ void +__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead"))) +_ReadWriteBarrier(void); unsigned int _rorx_u32(unsigned int, const unsigned int); -static __inline__ -unsigned int __cdecl _rotl(unsigned int _Value, int _Shift); -static __inline__ -unsigned short _rotl16(unsigned short _Value, unsigned char _Shift); -static __inline__ -unsigned __int64 __cdecl _rotl64(unsigned __int64 _Value, int _Shift); -static __inline__ -unsigned char _rotl8(unsigned char _Value, unsigned char _Shift); -static __inline__ -unsigned int __cdecl _rotr(unsigned int _Value, int _Shift); -static __inline__ -unsigned short _rotr16(unsigned short _Value, unsigned char _Shift); -static __inline__ -unsigned __int64 __cdecl _rotr64(unsigned __int64 _Value, int _Shift); -static __inline__ -unsigned char _rotr8(unsigned char _Value, unsigned char _Shift); int _sarx_i32(int, unsigned int); #if __STDC_HOSTED__ int __cdecl _setjmp(jmp_buf); @@ -285,16 +209,14 @@ unsigned int _shrx_u32(unsigned int, unsigned int); void _Store_HLERelease(long volatile *, long); void _Store64_HLERelease(__int64 volatile *, __int64); void _StorePointer_HLERelease(void *volatile *, void *); -static __inline__ -void _WriteBarrier(void); +static __inline__ void +__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead"))) +_WriteBarrier(void); unsigned __int32 xbegin(void); void _xend(void); static __inline__ #define _XCR_XFEATURE_ENABLED_MASK 0 unsigned __int64 __cdecl _xgetbv(unsigned int); -void __cdecl _xrstor(void const *, unsigned __int64); -void __cdecl _xsave(void *, unsigned __int64); -void __cdecl _xsaveopt(void *, unsigned __int64); void __cdecl _xsetbv(unsigned int, unsigned __int64); /* These additional intrinsics are turned on in x64/amd64/x86_64 mode. */ @@ -314,9 +236,6 @@ void __lwpval64(unsigned __int64, unsigned int, unsigned int); unsigned __int64 __lzcnt64(unsigned __int64); static __inline__ void __movsq(unsigned long long *, unsigned long long const *, size_t); -__int64 __mulh(__int64, __int64); -static __inline__ -unsigned __int64 __popcnt64(unsigned __int64); static __inline__ unsigned char __readgsbyte(unsigned long); static __inline__ @@ -355,7 +274,6 @@ static __inline__ unsigned char _bittestandreset64(__int64 *, __int64); static __inline__ unsigned char _bittestandset64(__int64 *, __int64); -unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64); long _InterlockedAnd_np(long volatile *_Value, long _Mask); short _InterlockedAnd16_np(short volatile *_Value, short _Mask); __int64 _InterlockedAnd64_np(__int64 volatile *_Value, __int64 _Mask); @@ -381,146 +299,58 @@ __int64 _InterlockedCompareExchange64_HLERelease(__int64 volatile *, __int64, __int64); __int64 _InterlockedCompareExchange64_np(__int64 volatile *_Destination, __int64 _Exchange, __int64 _Comparand); -void *_InterlockedCompareExchangePointer(void *volatile *_Destination, - void *_Exchange, void *_Comparand); void *_InterlockedCompareExchangePointer_np(void *volatile *_Destination, void *_Exchange, void *_Comparand); +long _InterlockedOr_np(long volatile *_Value, long _Mask); +short _InterlockedOr16_np(short volatile *_Value, short _Mask); +__int64 _InterlockedOr64_np(__int64 volatile *_Value, __int64 _Mask); +char _InterlockedOr8_np(char volatile *_Value, char _Mask); +long _InterlockedXor_np(long volatile *_Value, long _Mask); +short _InterlockedXor16_np(short volatile *_Value, short _Mask); +__int64 _InterlockedXor64_np(__int64 volatile *_Value, __int64 _Mask); +char _InterlockedXor8_np(char volatile *_Value, char _Mask); +unsigned __int64 _rorx_u64(unsigned __int64, const unsigned int); +__int64 _sarx_i64(__int64, unsigned int); +unsigned __int64 _shlx_u64(unsigned __int64, unsigned int); +unsigned __int64 _shrx_u64(unsigned __int64, unsigned int); +static __inline__ +__int64 __mulh(__int64, __int64); +static __inline__ +unsigned __int64 __umulh(unsigned __int64, unsigned __int64); +static __inline__ +__int64 _mul128(__int64, __int64, __int64*); +static __inline__ +unsigned __int64 _umul128(unsigned __int64, + unsigned __int64, + unsigned __int64*); + +#endif /* __x86_64__ */ + +#if defined(__x86_64__) || defined(__arm__) + static __inline__ __int64 _InterlockedDecrement64(__int64 volatile *_Addend); static __inline__ __int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value); static __inline__ __int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value); -void *_InterlockedExchangePointer(void *volatile *_Target, void *_Value); +static __inline__ +__int64 _InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value); static __inline__ __int64 _InterlockedIncrement64(__int64 volatile *_Addend); -long _InterlockedOr_np(long volatile *_Value, long _Mask); -short _InterlockedOr16_np(short volatile *_Value, short _Mask); static __inline__ __int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask); -__int64 _InterlockedOr64_np(__int64 volatile *_Value, __int64 _Mask); -char _InterlockedOr8_np(char volatile *_Value, char _Mask); -long _InterlockedXor_np(long volatile *_Value, long _Mask); -short _InterlockedXor16_np(short volatile *_Value, short _Mask); static __inline__ __int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask); -__int64 _InterlockedXor64_np(__int64 volatile *_Value, __int64 _Mask); -char _InterlockedXor8_np(char volatile *_Value, char _Mask); static __inline__ -__int64 _mul128(__int64 _Multiplier, __int64 _Multiplicand, - __int64 *_HighProduct); -unsigned __int64 _rorx_u64(unsigned __int64, const unsigned int); -__int64 _sarx_i64(__int64, unsigned int); -#if __STDC_HOSTED__ -int __cdecl _setjmpex(jmp_buf); +__int64 _InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask); + #endif -unsigned __int64 _shlx_u64(unsigned __int64, unsigned int); -unsigned __int64 _shrx_u64(unsigned __int64, unsigned int); -/* - * Multiply two 64-bit integers and obtain a 64-bit result. - * The low-half is returned directly and the high half is in an out parameter. - */ -static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS -_umul128(unsigned __int64 _Multiplier, unsigned __int64 _Multiplicand, - unsigned __int64 *_HighProduct) { - unsigned __int128 _FullProduct = - (unsigned __int128)_Multiplier * (unsigned __int128)_Multiplicand; - *_HighProduct = _FullProduct >> 64; - return _FullProduct; -} -static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS -__umulh(unsigned __int64 _Multiplier, unsigned __int64 _Multiplicand) { - unsigned __int128 _FullProduct = - (unsigned __int128)_Multiplier * (unsigned __int128)_Multiplicand; - return _FullProduct >> 64; -} -void __cdecl _xrstor64(void const *, unsigned __int64); -void __cdecl _xsave64(void *, unsigned __int64); -void __cdecl _xsaveopt64(void *, unsigned __int64); -#endif /* __x86_64__ */ - -/*----------------------------------------------------------------------------*\ -|* Bit Twiddling -\*----------------------------------------------------------------------------*/ -static __inline__ unsigned char __DEFAULT_FN_ATTRS -_rotl8(unsigned char _Value, unsigned char _Shift) { - _Shift &= 0x7; - return _Shift ? (_Value << _Shift) | (_Value >> (8 - _Shift)) : _Value; -} -static __inline__ unsigned char __DEFAULT_FN_ATTRS -_rotr8(unsigned char _Value, unsigned char _Shift) { - _Shift &= 0x7; - return _Shift ? (_Value >> _Shift) | (_Value << (8 - _Shift)) : _Value; -} -static __inline__ unsigned short __DEFAULT_FN_ATTRS -_rotl16(unsigned short _Value, unsigned char _Shift) { - _Shift &= 0xf; - return _Shift ? (_Value << _Shift) | (_Value >> (16 - _Shift)) : _Value; -} -static __inline__ unsigned short __DEFAULT_FN_ATTRS -_rotr16(unsigned short _Value, unsigned char _Shift) { - _Shift &= 0xf; - return _Shift ? (_Value >> _Shift) | (_Value << (16 - _Shift)) : _Value; -} -static __inline__ unsigned int __DEFAULT_FN_ATTRS -_rotl(unsigned int _Value, int _Shift) { - _Shift &= 0x1f; - return _Shift ? (_Value << _Shift) | (_Value >> (32 - _Shift)) : _Value; -} -static __inline__ unsigned int __DEFAULT_FN_ATTRS -_rotr(unsigned int _Value, int _Shift) { - _Shift &= 0x1f; - return _Shift ? (_Value >> _Shift) | (_Value << (32 - _Shift)) : _Value; -} -static __inline__ unsigned long __DEFAULT_FN_ATTRS -_lrotl(unsigned long _Value, int _Shift) { - _Shift &= 0x1f; - return _Shift ? (_Value << _Shift) | (_Value >> (32 - _Shift)) : _Value; -} -static __inline__ unsigned long __DEFAULT_FN_ATTRS -_lrotr(unsigned long _Value, int _Shift) { - _Shift &= 0x1f; - return _Shift ? (_Value >> _Shift) | (_Value << (32 - _Shift)) : _Value; -} -static -__inline__ unsigned __int64 __DEFAULT_FN_ATTRS -_rotl64(unsigned __int64 _Value, int _Shift) { - _Shift &= 0x3f; - return _Shift ? (_Value << _Shift) | (_Value >> (64 - _Shift)) : _Value; -} -static -__inline__ unsigned __int64 __DEFAULT_FN_ATTRS -_rotr64(unsigned __int64 _Value, int _Shift) { - _Shift &= 0x3f; - return _Shift ? (_Value >> _Shift) | (_Value << (64 - _Shift)) : _Value; -} /*----------------------------------------------------------------------------*\ |* Bit Counting and Testing \*----------------------------------------------------------------------------*/ static __inline__ unsigned char __DEFAULT_FN_ATTRS -_BitScanForward(unsigned long *_Index, unsigned long _Mask) { - if (!_Mask) - return 0; - *_Index = __builtin_ctzl(_Mask); - return 1; -} -static __inline__ unsigned char __DEFAULT_FN_ATTRS -_BitScanReverse(unsigned long *_Index, unsigned long _Mask) { - if (!_Mask) - return 0; - *_Index = 31 - __builtin_clzl(_Mask); - return 1; -} -static __inline__ unsigned short __DEFAULT_FN_ATTRS -__popcnt16(unsigned short _Value) { - return __builtin_popcount((int)_Value); -} -static __inline__ unsigned int __DEFAULT_FN_ATTRS -__popcnt(unsigned int _Value) { - return __builtin_popcount(_Value); -} -static __inline__ unsigned char __DEFAULT_FN_ATTRS _bittest(long const *_BitBase, long _BitPos) { return (*_BitBase >> _BitPos) & 1; } @@ -547,27 +377,25 @@ _interlockedbittestandset(long volatile *_BitBase, long _BitPos) { long _PrevVal = __atomic_fetch_or(_BitBase, 1l << _BitPos, __ATOMIC_SEQ_CST); return (_PrevVal >> _BitPos) & 1; } +#if defined(__arm__) || defined(__aarch64__) +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_interlockedbittestandset_acq(long volatile *_BitBase, long _BitPos) { + long _PrevVal = __atomic_fetch_or(_BitBase, 1l << _BitPos, __ATOMIC_ACQUIRE); + return (_PrevVal >> _BitPos) & 1; +} +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_interlockedbittestandset_nf(long volatile *_BitBase, long _BitPos) { + long _PrevVal = __atomic_fetch_or(_BitBase, 1l << _BitPos, __ATOMIC_RELAXED); + return (_PrevVal >> _BitPos) & 1; +} +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_interlockedbittestandset_rel(long volatile *_BitBase, long _BitPos) { + long _PrevVal = __atomic_fetch_or(_BitBase, 1l << _BitPos, __ATOMIC_RELEASE); + return (_PrevVal >> _BitPos) & 1; +} +#endif #ifdef __x86_64__ static __inline__ unsigned char __DEFAULT_FN_ATTRS -_BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask) { - if (!_Mask) - return 0; - *_Index = __builtin_ctzll(_Mask); - return 1; -} -static __inline__ unsigned char __DEFAULT_FN_ATTRS -_BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask) { - if (!_Mask) - return 0; - *_Index = 63 - __builtin_clzll(_Mask); - return 1; -} -static __inline__ -unsigned __int64 __DEFAULT_FN_ATTRS -__popcnt64(unsigned __int64 _Value) { - return __builtin_popcountll(_Value); -} -static __inline__ unsigned char __DEFAULT_FN_ATTRS _bittest64(__int64 const *_BitBase, __int64 _BitPos) { return (*_BitBase >> _BitPos) & 1; } @@ -599,198 +427,449 @@ _interlockedbittestandset64(__int64 volatile *_BitBase, __int64 _BitPos) { /*----------------------------------------------------------------------------*\ |* Interlocked Exchange Add \*----------------------------------------------------------------------------*/ +#if defined(__arm__) || defined(__aarch64__) static __inline__ char __DEFAULT_FN_ATTRS -_InterlockedExchangeAdd8(char volatile *_Addend, char _Value) { - return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST); +_InterlockedExchangeAdd8_acq(char volatile *_Addend, char _Value) { + return __atomic_fetch_add(_Addend, _Value, __ATOMIC_ACQUIRE); +} +static __inline__ char __DEFAULT_FN_ATTRS +_InterlockedExchangeAdd8_nf(char volatile *_Addend, char _Value) { + return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELAXED); +} +static __inline__ char __DEFAULT_FN_ATTRS +_InterlockedExchangeAdd8_rel(char volatile *_Addend, char _Value) { + return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELAXED); } static __inline__ short __DEFAULT_FN_ATTRS -_InterlockedExchangeAdd16(short volatile *_Addend, short _Value) { - return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST); -} -#ifdef __x86_64__ -static __inline__ __int64 __DEFAULT_FN_ATTRS -_InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value) { - return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST); -} -#endif -/*----------------------------------------------------------------------------*\ -|* Interlocked Exchange Sub -\*----------------------------------------------------------------------------*/ -static __inline__ char __DEFAULT_FN_ATTRS -_InterlockedExchangeSub8(char volatile *_Subend, char _Value) { - return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST); +_InterlockedExchangeAdd16_acq(short volatile *_Addend, short _Value) { + return __atomic_fetch_add(_Addend, _Value, __ATOMIC_ACQUIRE); } static __inline__ short __DEFAULT_FN_ATTRS -_InterlockedExchangeSub16(short volatile *_Subend, short _Value) { - return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST); +_InterlockedExchangeAdd16_nf(short volatile *_Addend, short _Value) { + return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELAXED); +} +static __inline__ short __DEFAULT_FN_ATTRS +_InterlockedExchangeAdd16_rel(short volatile *_Addend, short _Value) { + return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELEASE); } static __inline__ long __DEFAULT_FN_ATTRS -_InterlockedExchangeSub(long volatile *_Subend, long _Value) { - return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST); +_InterlockedExchangeAdd_acq(long volatile *_Addend, long _Value) { + return __atomic_fetch_add(_Addend, _Value, __ATOMIC_ACQUIRE); +} +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedExchangeAdd_nf(long volatile *_Addend, long _Value) { + return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELAXED); +} +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedExchangeAdd_rel(long volatile *_Addend, long _Value) { + return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELEASE); } -#ifdef __x86_64__ static __inline__ __int64 __DEFAULT_FN_ATTRS -_InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value) { - return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST); +_InterlockedExchangeAdd64_acq(__int64 volatile *_Addend, __int64 _Value) { + return __atomic_fetch_add(_Addend, _Value, __ATOMIC_ACQUIRE); +} +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedExchangeAdd64_nf(__int64 volatile *_Addend, __int64 _Value) { + return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELAXED); +} +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedExchangeAdd64_rel(__int64 volatile *_Addend, __int64 _Value) { + return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELEASE); } #endif /*----------------------------------------------------------------------------*\ |* Interlocked Increment \*----------------------------------------------------------------------------*/ +#if defined(__arm__) || defined(__aarch64__) static __inline__ short __DEFAULT_FN_ATTRS -_InterlockedIncrement16(short volatile *_Value) { - return __atomic_add_fetch(_Value, 1, __ATOMIC_SEQ_CST); +_InterlockedIncrement16_acq(short volatile *_Value) { + return __atomic_add_fetch(_Value, 1, __ATOMIC_ACQUIRE); +} +static __inline__ short __DEFAULT_FN_ATTRS +_InterlockedIncrement16_nf(short volatile *_Value) { + return __atomic_add_fetch(_Value, 1, __ATOMIC_RELAXED); +} +static __inline__ short __DEFAULT_FN_ATTRS +_InterlockedIncrement16_rel(short volatile *_Value) { + return __atomic_add_fetch(_Value, 1, __ATOMIC_RELEASE); +} +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedIncrement_acq(long volatile *_Value) { + return __atomic_add_fetch(_Value, 1, __ATOMIC_ACQUIRE); +} +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedIncrement_nf(long volatile *_Value) { + return __atomic_add_fetch(_Value, 1, __ATOMIC_RELAXED); +} +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedIncrement_rel(long volatile *_Value) { + return __atomic_add_fetch(_Value, 1, __ATOMIC_RELEASE); } -#ifdef __x86_64__ static __inline__ __int64 __DEFAULT_FN_ATTRS -_InterlockedIncrement64(__int64 volatile *_Value) { - return __atomic_add_fetch(_Value, 1, __ATOMIC_SEQ_CST); +_InterlockedIncrement64_acq(__int64 volatile *_Value) { + return __atomic_add_fetch(_Value, 1, __ATOMIC_ACQUIRE); +} +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedIncrement64_nf(__int64 volatile *_Value) { + return __atomic_add_fetch(_Value, 1, __ATOMIC_RELAXED); +} +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedIncrement64_rel(__int64 volatile *_Value) { + return __atomic_add_fetch(_Value, 1, __ATOMIC_RELEASE); } #endif /*----------------------------------------------------------------------------*\ |* Interlocked Decrement \*----------------------------------------------------------------------------*/ +#if defined(__arm__) || defined(__aarch64__) static __inline__ short __DEFAULT_FN_ATTRS -_InterlockedDecrement16(short volatile *_Value) { - return __atomic_sub_fetch(_Value, 1, __ATOMIC_SEQ_CST); +_InterlockedDecrement16_acq(short volatile *_Value) { + return __atomic_sub_fetch(_Value, 1, __ATOMIC_ACQUIRE); +} +static __inline__ short __DEFAULT_FN_ATTRS +_InterlockedDecrement16_nf(short volatile *_Value) { + return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELAXED); +} +static __inline__ short __DEFAULT_FN_ATTRS +_InterlockedDecrement16_rel(short volatile *_Value) { + return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELEASE); +} +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedDecrement_acq(long volatile *_Value) { + return __atomic_sub_fetch(_Value, 1, __ATOMIC_ACQUIRE); +} +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedDecrement_nf(long volatile *_Value) { + return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELAXED); +} +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedDecrement_rel(long volatile *_Value) { + return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELEASE); } -#ifdef __x86_64__ static __inline__ __int64 __DEFAULT_FN_ATTRS -_InterlockedDecrement64(__int64 volatile *_Value) { - return __atomic_sub_fetch(_Value, 1, __ATOMIC_SEQ_CST); +_InterlockedDecrement64_acq(__int64 volatile *_Value) { + return __atomic_sub_fetch(_Value, 1, __ATOMIC_ACQUIRE); +} +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedDecrement64_nf(__int64 volatile *_Value) { + return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELAXED); +} +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedDecrement64_rel(__int64 volatile *_Value) { + return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELEASE); } #endif /*----------------------------------------------------------------------------*\ |* Interlocked And \*----------------------------------------------------------------------------*/ +#if defined(__arm__) || defined(__aarch64__) static __inline__ char __DEFAULT_FN_ATTRS -_InterlockedAnd8(char volatile *_Value, char _Mask) { - return __atomic_and_fetch(_Value, _Mask, __ATOMIC_SEQ_CST); +_InterlockedAnd8_acq(char volatile *_Value, char _Mask) { + return __atomic_fetch_and(_Value, _Mask, __ATOMIC_ACQUIRE); +} +static __inline__ char __DEFAULT_FN_ATTRS +_InterlockedAnd8_nf(char volatile *_Value, char _Mask) { + return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELAXED); +} +static __inline__ char __DEFAULT_FN_ATTRS +_InterlockedAnd8_rel(char volatile *_Value, char _Mask) { + return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELEASE); } static __inline__ short __DEFAULT_FN_ATTRS -_InterlockedAnd16(short volatile *_Value, short _Mask) { - return __atomic_and_fetch(_Value, _Mask, __ATOMIC_SEQ_CST); +_InterlockedAnd16_acq(short volatile *_Value, short _Mask) { + return __atomic_fetch_and(_Value, _Mask, __ATOMIC_ACQUIRE); +} +static __inline__ short __DEFAULT_FN_ATTRS +_InterlockedAnd16_nf(short volatile *_Value, short _Mask) { + return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELAXED); +} +static __inline__ short __DEFAULT_FN_ATTRS +_InterlockedAnd16_rel(short volatile *_Value, short _Mask) { + return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELEASE); } static __inline__ long __DEFAULT_FN_ATTRS -_InterlockedAnd(long volatile *_Value, long _Mask) { - return __atomic_and_fetch(_Value, _Mask, __ATOMIC_SEQ_CST); +_InterlockedAnd_acq(long volatile *_Value, long _Mask) { + return __atomic_fetch_and(_Value, _Mask, __ATOMIC_ACQUIRE); +} +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedAnd_nf(long volatile *_Value, long _Mask) { + return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELAXED); +} +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedAnd_rel(long volatile *_Value, long _Mask) { + return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELEASE); } -#ifdef __x86_64__ static __inline__ __int64 __DEFAULT_FN_ATTRS -_InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask) { - return __atomic_and_fetch(_Value, _Mask, __ATOMIC_SEQ_CST); +_InterlockedAnd64_acq(__int64 volatile *_Value, __int64 _Mask) { + return __atomic_fetch_and(_Value, _Mask, __ATOMIC_ACQUIRE); +} +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedAnd64_nf(__int64 volatile *_Value, __int64 _Mask) { + return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELAXED); +} +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedAnd64_rel(__int64 volatile *_Value, __int64 _Mask) { + return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELEASE); } #endif /*----------------------------------------------------------------------------*\ |* Interlocked Or \*----------------------------------------------------------------------------*/ +#if defined(__arm__) || defined(__aarch64__) static __inline__ char __DEFAULT_FN_ATTRS -_InterlockedOr8(char volatile *_Value, char _Mask) { - return __atomic_or_fetch(_Value, _Mask, __ATOMIC_SEQ_CST); +_InterlockedOr8_acq(char volatile *_Value, char _Mask) { + return __atomic_fetch_or(_Value, _Mask, __ATOMIC_ACQUIRE); +} +static __inline__ char __DEFAULT_FN_ATTRS +_InterlockedOr8_nf(char volatile *_Value, char _Mask) { + return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELAXED); +} +static __inline__ char __DEFAULT_FN_ATTRS +_InterlockedOr8_rel(char volatile *_Value, char _Mask) { + return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELEASE); } static __inline__ short __DEFAULT_FN_ATTRS -_InterlockedOr16(short volatile *_Value, short _Mask) { - return __atomic_or_fetch(_Value, _Mask, __ATOMIC_SEQ_CST); +_InterlockedOr16_acq(short volatile *_Value, short _Mask) { + return __atomic_fetch_or(_Value, _Mask, __ATOMIC_ACQUIRE); +} +static __inline__ short __DEFAULT_FN_ATTRS +_InterlockedOr16_nf(short volatile *_Value, short _Mask) { + return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELAXED); +} +static __inline__ short __DEFAULT_FN_ATTRS +_InterlockedOr16_rel(short volatile *_Value, short _Mask) { + return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELEASE); } static __inline__ long __DEFAULT_FN_ATTRS -_InterlockedOr(long volatile *_Value, long _Mask) { - return __atomic_or_fetch(_Value, _Mask, __ATOMIC_SEQ_CST); +_InterlockedOr_acq(long volatile *_Value, long _Mask) { + return __atomic_fetch_or(_Value, _Mask, __ATOMIC_ACQUIRE); +} +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedOr_nf(long volatile *_Value, long _Mask) { + return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELAXED); +} +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedOr_rel(long volatile *_Value, long _Mask) { + return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELEASE); } -#ifdef __x86_64__ static __inline__ __int64 __DEFAULT_FN_ATTRS -_InterlockedOr64(__int64 volatile *_Value, __int64 _Mask) { - return __atomic_or_fetch(_Value, _Mask, __ATOMIC_SEQ_CST); +_InterlockedOr64_acq(__int64 volatile *_Value, __int64 _Mask) { + return __atomic_fetch_or(_Value, _Mask, __ATOMIC_ACQUIRE); +} +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedOr64_nf(__int64 volatile *_Value, __int64 _Mask) { + return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELAXED); +} +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedOr64_rel(__int64 volatile *_Value, __int64 _Mask) { + return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELEASE); } #endif /*----------------------------------------------------------------------------*\ |* Interlocked Xor \*----------------------------------------------------------------------------*/ +#if defined(__arm__) || defined(__aarch64__) static __inline__ char __DEFAULT_FN_ATTRS -_InterlockedXor8(char volatile *_Value, char _Mask) { - return __atomic_xor_fetch(_Value, _Mask, __ATOMIC_SEQ_CST); +_InterlockedXor8_acq(char volatile *_Value, char _Mask) { + return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_ACQUIRE); +} +static __inline__ char __DEFAULT_FN_ATTRS +_InterlockedXor8_nf(char volatile *_Value, char _Mask) { + return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELAXED); +} +static __inline__ char __DEFAULT_FN_ATTRS +_InterlockedXor8_rel(char volatile *_Value, char _Mask) { + return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELEASE); } static __inline__ short __DEFAULT_FN_ATTRS -_InterlockedXor16(short volatile *_Value, short _Mask) { - return __atomic_xor_fetch(_Value, _Mask, __ATOMIC_SEQ_CST); +_InterlockedXor16_acq(short volatile *_Value, short _Mask) { + return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_ACQUIRE); +} +static __inline__ short __DEFAULT_FN_ATTRS +_InterlockedXor16_nf(short volatile *_Value, short _Mask) { + return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELAXED); +} +static __inline__ short __DEFAULT_FN_ATTRS +_InterlockedXor16_rel(short volatile *_Value, short _Mask) { + return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELEASE); } static __inline__ long __DEFAULT_FN_ATTRS -_InterlockedXor(long volatile *_Value, long _Mask) { - return __atomic_xor_fetch(_Value, _Mask, __ATOMIC_SEQ_CST); +_InterlockedXor_acq(long volatile *_Value, long _Mask) { + return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_ACQUIRE); +} +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedXor_nf(long volatile *_Value, long _Mask) { + return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELAXED); +} +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedXor_rel(long volatile *_Value, long _Mask) { + return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELEASE); } -#ifdef __x86_64__ static __inline__ __int64 __DEFAULT_FN_ATTRS -_InterlockedXor64(__int64 volatile *_Value, __int64 _Mask) { - return __atomic_xor_fetch(_Value, _Mask, __ATOMIC_SEQ_CST); +_InterlockedXor64_acq(__int64 volatile *_Value, __int64 _Mask) { + return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_ACQUIRE); +} +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedXor64_nf(__int64 volatile *_Value, __int64 _Mask) { + return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELAXED); +} +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedXor64_rel(__int64 volatile *_Value, __int64 _Mask) { + return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELEASE); } #endif /*----------------------------------------------------------------------------*\ |* Interlocked Exchange \*----------------------------------------------------------------------------*/ +#if defined(__arm__) || defined(__aarch64__) static __inline__ char __DEFAULT_FN_ATTRS -_InterlockedExchange8(char volatile *_Target, char _Value) { - __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST); +_InterlockedExchange8_acq(char volatile *_Target, char _Value) { + __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_ACQUIRE); + return _Value; +} +static __inline__ char __DEFAULT_FN_ATTRS +_InterlockedExchange8_nf(char volatile *_Target, char _Value) { + __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELAXED); + return _Value; +} +static __inline__ char __DEFAULT_FN_ATTRS +_InterlockedExchange8_rel(char volatile *_Target, char _Value) { + __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELEASE); return _Value; } static __inline__ short __DEFAULT_FN_ATTRS -_InterlockedExchange16(short volatile *_Target, short _Value) { - __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST); +_InterlockedExchange16_acq(short volatile *_Target, short _Value) { + __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_ACQUIRE); + return _Value; +} +static __inline__ short __DEFAULT_FN_ATTRS +_InterlockedExchange16_nf(short volatile *_Target, short _Value) { + __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELAXED); + return _Value; +} +static __inline__ short __DEFAULT_FN_ATTRS +_InterlockedExchange16_rel(short volatile *_Target, short _Value) { + __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELEASE); + return _Value; +} +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedExchange_acq(long volatile *_Target, long _Value) { + __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_ACQUIRE); + return _Value; +} +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedExchange_nf(long volatile *_Target, long _Value) { + __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELAXED); + return _Value; +} +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedExchange_rel(long volatile *_Target, long _Value) { + __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELEASE); return _Value; } -#ifdef __x86_64__ static __inline__ __int64 __DEFAULT_FN_ATTRS -_InterlockedExchange64(__int64 volatile *_Target, __int64 _Value) { - __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST); +_InterlockedExchange64_acq(__int64 volatile *_Target, __int64 _Value) { + __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_ACQUIRE); + return _Value; +} +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedExchange64_nf(__int64 volatile *_Target, __int64 _Value) { + __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELAXED); + return _Value; +} +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedExchange64_rel(__int64 volatile *_Target, __int64 _Value) { + __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELEASE); return _Value; } #endif /*----------------------------------------------------------------------------*\ |* Interlocked Compare Exchange \*----------------------------------------------------------------------------*/ +#if defined(__arm__) || defined(__aarch64__) static __inline__ char __DEFAULT_FN_ATTRS -_InterlockedCompareExchange8(char volatile *_Destination, +_InterlockedCompareExchange8_acq(char volatile *_Destination, char _Exchange, char _Comparand) { __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, - __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); + __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); + return _Comparand; +} +static __inline__ char __DEFAULT_FN_ATTRS +_InterlockedCompareExchange8_nf(char volatile *_Destination, + char _Exchange, char _Comparand) { + __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, + __ATOMIC_SEQ_CST, __ATOMIC_RELAXED); + return _Comparand; +} +static __inline__ char __DEFAULT_FN_ATTRS +_InterlockedCompareExchange8_rel(char volatile *_Destination, + char _Exchange, char _Comparand) { + __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, + __ATOMIC_SEQ_CST, __ATOMIC_RELEASE); return _Comparand; } static __inline__ short __DEFAULT_FN_ATTRS -_InterlockedCompareExchange16(short volatile *_Destination, +_InterlockedCompareExchange16_acq(short volatile *_Destination, short _Exchange, short _Comparand) { __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, - __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); + __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); + return _Comparand; +} +static __inline__ short __DEFAULT_FN_ATTRS +_InterlockedCompareExchange16_nf(short volatile *_Destination, + short _Exchange, short _Comparand) { + __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, + __ATOMIC_SEQ_CST, __ATOMIC_RELAXED); + return _Comparand; +} +static __inline__ short __DEFAULT_FN_ATTRS +_InterlockedCompareExchange16_rel(short volatile *_Destination, + short _Exchange, short _Comparand) { + __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, + __ATOMIC_SEQ_CST, __ATOMIC_RELEASE); + return _Comparand; +} +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedCompareExchange_acq(long volatile *_Destination, + long _Exchange, long _Comparand) { + __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, + __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); + return _Comparand; +} +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedCompareExchange_nf(long volatile *_Destination, + long _Exchange, long _Comparand) { + __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, + __ATOMIC_SEQ_CST, __ATOMIC_RELAXED); + return _Comparand; +} +static __inline__ short __DEFAULT_FN_ATTRS +_InterlockedCompareExchange_rel(long volatile *_Destination, + long _Exchange, long _Comparand) { + __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, + __ATOMIC_SEQ_CST, __ATOMIC_RELEASE); return _Comparand; } static __inline__ __int64 __DEFAULT_FN_ATTRS -_InterlockedCompareExchange64(__int64 volatile *_Destination, +_InterlockedCompareExchange64_acq(__int64 volatile *_Destination, __int64 _Exchange, __int64 _Comparand) { __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, - __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); + __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); return _Comparand; } -/*----------------------------------------------------------------------------*\ -|* Barriers -\*----------------------------------------------------------------------------*/ -#if defined(__i386__) || defined(__x86_64__) -static __inline__ void __DEFAULT_FN_ATTRS -__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead"))) -_ReadWriteBarrier(void) { - __asm__ volatile ("" : : : "memory"); +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedCompareExchange64_nf(__int64 volatile *_Destination, + __int64 _Exchange, __int64 _Comparand) { + __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, + __ATOMIC_SEQ_CST, __ATOMIC_RELAXED); + return _Comparand; } -static __inline__ void __DEFAULT_FN_ATTRS -__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead"))) -_ReadBarrier(void) { - __asm__ volatile ("" : : : "memory"); -} -static __inline__ void __DEFAULT_FN_ATTRS -__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead"))) -_WriteBarrier(void) { - __asm__ volatile ("" : : : "memory"); -} -#endif -#ifdef __x86_64__ -static __inline__ void __DEFAULT_FN_ATTRS -__faststorefence(void) { - __asm__ volatile("lock orq $0, (%%rsp)" : : : "memory"); +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedCompareExchange64_rel(__int64 volatile *_Destination, + __int64 _Exchange, __int64 _Comparand) { + __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, + __ATOMIC_SEQ_CST, __ATOMIC_RELEASE); + return _Comparand; } #endif /*----------------------------------------------------------------------------*\ @@ -807,20 +886,24 @@ static __inline__ unsigned char __DEFAULT_FN_ATTRS __readfsbyte(unsigned long __offset) { return *__ptr_to_addr_space(257, unsigned char, __offset); } -static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS -__readfsqword(unsigned long __offset) { - return *__ptr_to_addr_space(257, unsigned __int64, __offset); -} static __inline__ unsigned short __DEFAULT_FN_ATTRS __readfsword(unsigned long __offset) { return *__ptr_to_addr_space(257, unsigned short, __offset); } +static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS +__readfsqword(unsigned long __offset) { + return *__ptr_to_addr_space(257, unsigned __int64, __offset); +} #endif #ifdef __x86_64__ static __inline__ unsigned char __DEFAULT_FN_ATTRS __readgsbyte(unsigned long __offset) { return *__ptr_to_addr_space(256, unsigned char, __offset); } +static __inline__ unsigned short __DEFAULT_FN_ATTRS +__readgsword(unsigned long __offset) { + return *__ptr_to_addr_space(256, unsigned short, __offset); +} static __inline__ unsigned long __DEFAULT_FN_ATTRS __readgsdword(unsigned long __offset) { return *__ptr_to_addr_space(256, unsigned long, __offset); @@ -829,10 +912,6 @@ static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS __readgsqword(unsigned long __offset) { return *__ptr_to_addr_space(256, unsigned __int64, __offset); } -static __inline__ unsigned short __DEFAULT_FN_ATTRS -__readgsword(unsigned long __offset) { - return *__ptr_to_addr_space(256, unsigned short, __offset); -} #endif #undef __ptr_to_addr_space /*----------------------------------------------------------------------------*\ @@ -841,59 +920,39 @@ __readgsword(unsigned long __offset) { #if defined(__i386__) || defined(__x86_64__) static __inline__ void __DEFAULT_FN_ATTRS __movsb(unsigned char *__dst, unsigned char const *__src, size_t __n) { - __asm__("rep movsb" : : "D"(__dst), "S"(__src), "c"(__n) - : "%edi", "%esi", "%ecx"); + __asm__("rep movsb" : : "D"(__dst), "S"(__src), "c"(__n)); } static __inline__ void __DEFAULT_FN_ATTRS __movsd(unsigned long *__dst, unsigned long const *__src, size_t __n) { - __asm__("rep movsl" : : "D"(__dst), "S"(__src), "c"(__n) - : "%edi", "%esi", "%ecx"); + __asm__("rep movsl" : : "D"(__dst), "S"(__src), "c"(__n)); } static __inline__ void __DEFAULT_FN_ATTRS __movsw(unsigned short *__dst, unsigned short const *__src, size_t __n) { - __asm__("rep movsh" : : "D"(__dst), "S"(__src), "c"(__n) - : "%edi", "%esi", "%ecx"); -} -static __inline__ void __DEFAULT_FN_ATTRS -__stosb(unsigned char *__dst, unsigned char __x, size_t __n) { - __asm__("rep stosb" : : "D"(__dst), "a"(__x), "c"(__n) - : "%edi", "%ecx"); + __asm__("rep movsw" : : "D"(__dst), "S"(__src), "c"(__n)); } static __inline__ void __DEFAULT_FN_ATTRS __stosd(unsigned long *__dst, unsigned long __x, size_t __n) { - __asm__("rep stosl" : : "D"(__dst), "a"(__x), "c"(__n) - : "%edi", "%ecx"); + __asm__("rep stosl" : : "D"(__dst), "a"(__x), "c"(__n)); } static __inline__ void __DEFAULT_FN_ATTRS __stosw(unsigned short *__dst, unsigned short __x, size_t __n) { - __asm__("rep stosh" : : "D"(__dst), "a"(__x), "c"(__n) - : "%edi", "%ecx"); + __asm__("rep stosw" : : "D"(__dst), "a"(__x), "c"(__n)); } #endif #ifdef __x86_64__ static __inline__ void __DEFAULT_FN_ATTRS __movsq(unsigned long long *__dst, unsigned long long const *__src, size_t __n) { - __asm__("rep movsq" : : "D"(__dst), "S"(__src), "c"(__n) - : "%edi", "%esi", "%ecx"); + __asm__("rep movsq" : : "D"(__dst), "S"(__src), "c"(__n)); } static __inline__ void __DEFAULT_FN_ATTRS __stosq(unsigned __int64 *__dst, unsigned __int64 __x, size_t __n) { - __asm__("rep stosq" : : "D"(__dst), "a"(__x), "c"(__n) - : "%edi", "%ecx"); + __asm__("rep stosq" : : "D"(__dst), "a"(__x), "c"(__n)); } #endif /*----------------------------------------------------------------------------*\ |* Misc \*----------------------------------------------------------------------------*/ -static __inline__ void * __DEFAULT_FN_ATTRS -_AddressOfReturnAddress(void) { - return (void*)((char*)__builtin_frame_address(0) + sizeof(void*)); -} -static __inline__ void * __DEFAULT_FN_ATTRS -_ReturnAddress(void) { - return __builtin_return_address(0); -} #if defined(__i386__) || defined(__x86_64__) static __inline__ void __DEFAULT_FN_ATTRS __cpuid(int __info[4], int __level) { @@ -915,6 +974,10 @@ static __inline__ void __DEFAULT_FN_ATTRS __halt(void) { __asm__ volatile ("hlt"); } +static __inline__ void __DEFAULT_FN_ATTRS +__nop(void) { + __asm__ volatile ("nop"); +} #endif /*----------------------------------------------------------------------------*\ diff --git a/c_headers/inttypes.h b/c_headers/inttypes.h index 3d59d141d..1d8eabab0 100644 --- a/c_headers/inttypes.h +++ b/c_headers/inttypes.h @@ -23,6 +23,10 @@ #ifndef __CLANG_INTTYPES_H #define __CLANG_INTTYPES_H +#if defined(_MSC_VER) && _MSC_VER < 1800 +#error MSVC does not have inttypes.h prior to Visual Studio 2013 +#endif + #include_next #if defined(_MSC_VER) && _MSC_VER < 1900 diff --git a/c_headers/lzcntintrin.h b/c_headers/lzcntintrin.h index 8ee29975c..3d2769da3 100644 --- a/c_headers/lzcntintrin.h +++ b/c_headers/lzcntintrin.h @@ -25,28 +25,54 @@ #error "Never use directly; include instead." #endif -#ifndef __LZCNT__ -# error "LZCNT instruction is not enabled" -#endif /* __LZCNT__ */ - #ifndef __LZCNTINTRIN_H #define __LZCNTINTRIN_H /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("lzcnt"))) +/// \brief Counts the number of leading zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c LZCNT instruction. +/// +/// \param __X +/// An unsigned 16-bit integer whose leading zeros are to be counted. +/// \returns An unsigned 16-bit integer containing the number of leading zero +/// bits in the operand. static __inline__ unsigned short __DEFAULT_FN_ATTRS __lzcnt16(unsigned short __X) { return __X ? __builtin_clzs(__X) : 16; } +/// \brief Counts the number of leading zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c LZCNT instruction. +/// +/// \param __X +/// An unsigned 32-bit integer whose leading zeros are to be counted. +/// \returns An unsigned 32-bit integer containing the number of leading zero +/// bits in the operand. static __inline__ unsigned int __DEFAULT_FN_ATTRS __lzcnt32(unsigned int __X) { return __X ? __builtin_clz(__X) : 32; } +/// \brief Counts the number of leading zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c LZCNT instruction. +/// +/// \param __X +/// An unsigned 32-bit integer whose leading zeros are to be counted. +/// \returns An unsigned 32-bit integer containing the number of leading zero +/// bits in the operand. static __inline__ unsigned int __DEFAULT_FN_ATTRS _lzcnt_u32(unsigned int __X) { @@ -54,12 +80,32 @@ _lzcnt_u32(unsigned int __X) } #ifdef __x86_64__ +/// \brief Counts the number of leading zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c LZCNT instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose leading zeros are to be counted. +/// \returns An unsigned 64-bit integer containing the number of leading zero +/// bits in the operand. static __inline__ unsigned long long __DEFAULT_FN_ATTRS __lzcnt64(unsigned long long __X) { return __X ? __builtin_clzll(__X) : 64; } +/// \brief Counts the number of leading zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c LZCNT instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose leading zeros are to be counted. +/// \returns An unsigned 64-bit integer containing the number of leading zero +/// bits in the operand. static __inline__ unsigned long long __DEFAULT_FN_ATTRS _lzcnt_u64(unsigned long long __X) { diff --git a/c_headers/mm3dnow.h b/c_headers/mm3dnow.h index ac8e0f4af..294866c1d 100644 --- a/c_headers/mm3dnow.h +++ b/c_headers/mm3dnow.h @@ -30,10 +30,10 @@ typedef float __v2sf __attribute__((__vector_size__(8))); /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("3dnow"))) static __inline__ void __DEFAULT_FN_ATTRS -_m_femms() { +_m_femms(void) { __builtin_ia32_femms(); } @@ -132,6 +132,10 @@ _m_pmulhrw(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_pmulhrw((__v4hi)__m1, (__v4hi)__m2); } +/* Handle the 3dnowa instructions here. */ +#undef __DEFAULT_FN_ATTRS +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("3dnowa"))) + static __inline__ __m64 __DEFAULT_FN_ATTRS _m_pf2iw(__m64 __m) { return (__m64)__builtin_ia32_pf2iw((__v2sf)__m); diff --git a/c_headers/mmintrin.h b/c_headers/mmintrin.h index 0be5f32c7..e0c277a65 100644 --- a/c_headers/mmintrin.h +++ b/c_headers/mmintrin.h @@ -24,379 +24,1328 @@ #ifndef __MMINTRIN_H #define __MMINTRIN_H -#ifndef __MMX__ -#error "MMX instruction set not enabled" -#else - typedef long long __m64 __attribute__((__vector_size__(8))); +typedef long long __v1di __attribute__((__vector_size__(8))); typedef int __v2si __attribute__((__vector_size__(8))); typedef short __v4hi __attribute__((__vector_size__(8))); typedef char __v8qi __attribute__((__vector_size__(8))); /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("mmx"))) +/// \brief Clears the MMX state by setting the state of the x87 stack registers +/// to empty. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the EMMS instruction. +/// static __inline__ void __DEFAULT_FN_ATTRS _mm_empty(void) { __builtin_ia32_emms(); } +/// \brief Constructs a 64-bit integer vector, setting the lower 32 bits to the +/// value of the 32-bit integer parameter and setting the upper 32 bits to 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVD / MOVD instruction. +/// +/// \param __i +/// A 32-bit integer value. +/// \returns A 64-bit integer vector. The lower 32 bits contain the value of the +/// parameter. The upper 32 bits are set to 0. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_cvtsi32_si64(int __i) { return (__m64)__builtin_ia32_vec_init_v2si(__i, 0); } +/// \brief Returns the lower 32 bits of a 64-bit integer vector as a 32-bit +/// signed integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVD / MOVD instruction. +/// +/// \param __m +/// A 64-bit integer vector. +/// \returns A 32-bit signed integer value containing the lower 32 bits of the +/// parameter. static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtsi64_si32(__m64 __m) { return __builtin_ia32_vec_ext_v2si((__v2si)__m, 0); } +/// \brief Casts a 64-bit signed integer value into a 64-bit integer vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVQ / MOVD instruction. +/// +/// \param __i +/// A 64-bit signed integer. +/// \returns A 64-bit integer vector containing the same bitwise pattern as the +/// parameter. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_cvtsi64_m64(long long __i) { return (__m64)__i; } +/// \brief Casts a 64-bit integer vector into a 64-bit signed integer value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVQ / MOVD instruction. +/// +/// \param __m +/// A 64-bit integer vector. +/// \returns A 64-bit signed integer containing the same bitwise pattern as the +/// parameter. static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvtm64_si64(__m64 __m) { return (long long)__m; } +/// \brief Converts 16-bit signed integers from both 64-bit integer vector +/// parameters of [4 x i16] into 8-bit signed integer values, and constructs +/// a 64-bit integer vector of [8 x i8] as the result. Positive values +/// greater than 0x7F are saturated to 0x7F. Negative values less than 0x80 +/// are saturated to 0x80. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PACKSSWB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a +/// 16-bit signed integer and is converted to an 8-bit signed integer with +/// saturation. Positive values greater than 0x7F are saturated to 0x7F. +/// Negative values less than 0x80 are saturated to 0x80. The converted +/// [4 x i8] values are written to the lower 32 bits of the result. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a +/// 16-bit signed integer and is converted to an 8-bit signed integer with +/// saturation. Positive values greater than 0x7F are saturated to 0x7F. +/// Negative values less than 0x80 are saturated to 0x80. The converted +/// [4 x i8] values are written to the upper 32 bits of the result. +/// \returns A 64-bit integer vector of [8 x i8] containing the converted +/// values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_packs_pi16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_packsswb((__v4hi)__m1, (__v4hi)__m2); } +/// \brief Converts 32-bit signed integers from both 64-bit integer vector +/// parameters of [2 x i32] into 16-bit signed integer values, and constructs +/// a 64-bit integer vector of [4 x i16] as the result. Positive values +/// greater than 0x7FFF are saturated to 0x7FFF. Negative values less than +/// 0x8000 are saturated to 0x8000. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PACKSSDW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [2 x i32]. Each 32-bit element is treated as a +/// 32-bit signed integer and is converted to a 16-bit signed integer with +/// saturation. Positive values greater than 0x7FFF are saturated to 0x7FFF. +/// Negative values less than 0x8000 are saturated to 0x8000. The converted +/// [2 x i16] values are written to the lower 32 bits of the result. +/// \param __m2 +/// A 64-bit integer vector of [2 x i32]. Each 32-bit element is treated as a +/// 32-bit signed integer and is converted to a 16-bit signed integer with +/// saturation. Positive values greater than 0x7FFF are saturated to 0x7FFF. +/// Negative values less than 0x8000 are saturated to 0x8000. The converted +/// [2 x i16] values are written to the upper 32 bits of the result. +/// \returns A 64-bit integer vector of [4 x i16] containing the converted +/// values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_packs_pi32(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_packssdw((__v2si)__m1, (__v2si)__m2); } +/// \brief Converts 16-bit signed integers from both 64-bit integer vector +/// parameters of [4 x i16] into 8-bit unsigned integer values, and +/// constructs a 64-bit integer vector of [8 x i8] as the result. Values +/// greater than 0xFF are saturated to 0xFF. Values less than 0 are saturated +/// to 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PACKUSWB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a +/// 16-bit signed integer and is converted to an 8-bit unsigned integer with +/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less +/// than 0 are saturated to 0. The converted [4 x i8] values are written to +/// the lower 32 bits of the result. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a +/// 16-bit signed integer and is converted to an 8-bit unsigned integer with +/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less +/// than 0 are saturated to 0. The converted [4 x i8] values are written to +/// the upper 32 bits of the result. +/// \returns A 64-bit integer vector of [8 x i8] containing the converted +/// values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_packs_pu16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_packuswb((__v4hi)__m1, (__v4hi)__m2); } +/// \brief Unpacks the upper 32 bits from two 64-bit integer vectors of [8 x i8] +/// and interleaves them into a 64-bit integer vector of [8 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PUNPCKHBW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8]. \n +/// Bits [39:32] are written to bits [7:0] of the result. \n +/// Bits [47:40] are written to bits [23:16] of the result. \n +/// Bits [55:48] are written to bits [39:32] of the result. \n +/// Bits [63:56] are written to bits [55:48] of the result. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8]. +/// Bits [39:32] are written to bits [15:8] of the result. \n +/// Bits [47:40] are written to bits [31:24] of the result. \n +/// Bits [55:48] are written to bits [47:40] of the result. \n +/// Bits [63:56] are written to bits [63:56] of the result. +/// \returns A 64-bit integer vector of [8 x i8] containing the interleaved +/// values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_unpackhi_pi8(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_punpckhbw((__v8qi)__m1, (__v8qi)__m2); } +/// \brief Unpacks the upper 32 bits from two 64-bit integer vectors of +/// [4 x i16] and interleaves them into a 64-bit integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PUNPCKHWD instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// Bits [47:32] are written to bits [15:0] of the result. \n +/// Bits [63:48] are written to bits [47:32] of the result. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// Bits [47:32] are written to bits [31:16] of the result. \n +/// Bits [63:48] are written to bits [63:48] of the result. +/// \returns A 64-bit integer vector of [4 x i16] containing the interleaved +/// values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_unpackhi_pi16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_punpckhwd((__v4hi)__m1, (__v4hi)__m2); } +/// \brief Unpacks the upper 32 bits from two 64-bit integer vectors of +/// [2 x i32] and interleaves them into a 64-bit integer vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PUNPCKHDQ instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [2 x i32]. The upper 32 bits are written to +/// the lower 32 bits of the result. +/// \param __m2 +/// A 64-bit integer vector of [2 x i32]. The upper 32 bits are written to +/// the upper 32 bits of the result. +/// \returns A 64-bit integer vector of [2 x i32] containing the interleaved +/// values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_unpackhi_pi32(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_punpckhdq((__v2si)__m1, (__v2si)__m2); } +/// \brief Unpacks the lower 32 bits from two 64-bit integer vectors of [8 x i8] +/// and interleaves them into a 64-bit integer vector of [8 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PUNPCKLBW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8]. +/// Bits [7:0] are written to bits [7:0] of the result. \n +/// Bits [15:8] are written to bits [23:16] of the result. \n +/// Bits [23:16] are written to bits [39:32] of the result. \n +/// Bits [31:24] are written to bits [55:48] of the result. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8]. +/// Bits [7:0] are written to bits [15:8] of the result. \n +/// Bits [15:8] are written to bits [31:24] of the result. \n +/// Bits [23:16] are written to bits [47:40] of the result. \n +/// Bits [31:24] are written to bits [63:56] of the result. +/// \returns A 64-bit integer vector of [8 x i8] containing the interleaved +/// values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_unpacklo_pi8(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_punpcklbw((__v8qi)__m1, (__v8qi)__m2); } +/// \brief Unpacks the lower 32 bits from two 64-bit integer vectors of +/// [4 x i16] and interleaves them into a 64-bit integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PUNPCKLWD instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// Bits [15:0] are written to bits [15:0] of the result. \n +/// Bits [31:16] are written to bits [47:32] of the result. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// Bits [15:0] are written to bits [31:16] of the result. \n +/// Bits [31:16] are written to bits [63:48] of the result. +/// \returns A 64-bit integer vector of [4 x i16] containing the interleaved +/// values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_unpacklo_pi16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_punpcklwd((__v4hi)__m1, (__v4hi)__m2); } +/// \brief Unpacks the lower 32 bits from two 64-bit integer vectors of +/// [2 x i32] and interleaves them into a 64-bit integer vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PUNPCKLDQ instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [2 x i32]. The lower 32 bits are written to +/// the lower 32 bits of the result. +/// \param __m2 +/// A 64-bit integer vector of [2 x i32]. The lower 32 bits are written to +/// the upper 32 bits of the result. +/// \returns A 64-bit integer vector of [2 x i32] containing the interleaved +/// values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_unpacklo_pi32(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_punpckldq((__v2si)__m1, (__v2si)__m2); } +/// \brief Adds each 8-bit integer element of the first 64-bit integer vector +/// of [8 x i8] to the corresponding 8-bit integer element of the second +/// 64-bit integer vector of [8 x i8]. The lower 8 bits of the results are +/// packed into a 64-bit integer vector of [8 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PADDB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8]. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8]. +/// \returns A 64-bit integer vector of [8 x i8] containing the sums of both +/// parameters. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_add_pi8(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_paddb((__v8qi)__m1, (__v8qi)__m2); } +/// \brief Adds each 16-bit integer element of the first 64-bit integer vector +/// of [4 x i16] to the corresponding 16-bit integer element of the second +/// 64-bit integer vector of [4 x i16]. The lower 16 bits of the results are +/// packed into a 64-bit integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PADDW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// \returns A 64-bit integer vector of [4 x i16] containing the sums of both +/// parameters. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_add_pi16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_paddw((__v4hi)__m1, (__v4hi)__m2); } +/// \brief Adds each 32-bit integer element of the first 64-bit integer vector +/// of [2 x i32] to the corresponding 32-bit integer element of the second +/// 64-bit integer vector of [2 x i32]. The lower 32 bits of the results are +/// packed into a 64-bit integer vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PADDD instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [2 x i32]. +/// \param __m2 +/// A 64-bit integer vector of [2 x i32]. +/// \returns A 64-bit integer vector of [2 x i32] containing the sums of both +/// parameters. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_add_pi32(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_paddd((__v2si)__m1, (__v2si)__m2); } +/// \brief Adds each 8-bit signed integer element of the first 64-bit integer +/// vector of [8 x i8] to the corresponding 8-bit signed integer element of +/// the second 64-bit integer vector of [8 x i8]. Positive sums greater than +/// 0x7F are saturated to 0x7F. Negative sums less than 0x80 are saturated to +/// 0x80. The results are packed into a 64-bit integer vector of [8 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PADDSB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8]. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8]. +/// \returns A 64-bit integer vector of [8 x i8] containing the saturated sums +/// of both parameters. static __inline__ __m64 __DEFAULT_FN_ATTRS -_mm_adds_pi8(__m64 __m1, __m64 __m2) +_mm_adds_pi8(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_paddsb((__v8qi)__m1, (__v8qi)__m2); } +/// \brief Adds each 16-bit signed integer element of the first 64-bit integer +/// vector of [4 x i16] to the corresponding 16-bit signed integer element of +/// the second 64-bit integer vector of [4 x i16]. Positive sums greater than +/// 0x7FFF are saturated to 0x7FFF. Negative sums less than 0x8000 are +/// saturated to 0x8000. The results are packed into a 64-bit integer vector +/// of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PADDSW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// \returns A 64-bit integer vector of [4 x i16] containing the saturated sums +/// of both parameters. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_adds_pi16(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_paddsw((__v4hi)__m1, (__v4hi)__m2); + return (__m64)__builtin_ia32_paddsw((__v4hi)__m1, (__v4hi)__m2); } +/// \brief Adds each 8-bit unsigned integer element of the first 64-bit integer +/// vector of [8 x i8] to the corresponding 8-bit unsigned integer element of +/// the second 64-bit integer vector of [8 x i8]. Sums greater than 0xFF are +/// saturated to 0xFF. The results are packed into a 64-bit integer vector of +/// [8 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PADDUSB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8]. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8]. +/// \returns A 64-bit integer vector of [8 x i8] containing the saturated +/// unsigned sums of both parameters. static __inline__ __m64 __DEFAULT_FN_ATTRS -_mm_adds_pu8(__m64 __m1, __m64 __m2) +_mm_adds_pu8(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_paddusb((__v8qi)__m1, (__v8qi)__m2); } - + +/// \brief Adds each 16-bit unsigned integer element of the first 64-bit integer +/// vector of [4 x i16] to the corresponding 16-bit unsigned integer element +/// of the second 64-bit integer vector of [4 x i16]. Sums greater than +/// 0xFFFF are saturated to 0xFFFF. The results are packed into a 64-bit +/// integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PADDUSW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// \returns A 64-bit integer vector of [4 x i16] containing the saturated +/// unsigned sums of both parameters. static __inline__ __m64 __DEFAULT_FN_ATTRS -_mm_adds_pu16(__m64 __m1, __m64 __m2) +_mm_adds_pu16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_paddusw((__v4hi)__m1, (__v4hi)__m2); } +/// \brief Subtracts each 8-bit integer element of the second 64-bit integer +/// vector of [8 x i8] from the corresponding 8-bit integer element of the +/// first 64-bit integer vector of [8 x i8]. The lower 8 bits of the results +/// are packed into a 64-bit integer vector of [8 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSUBB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8] containing the minuends. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8] containing the subtrahends. +/// \returns A 64-bit integer vector of [8 x i8] containing the differences of +/// both parameters. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_sub_pi8(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_psubb((__v8qi)__m1, (__v8qi)__m2); } - + +/// \brief Subtracts each 16-bit integer element of the second 64-bit integer +/// vector of [4 x i16] from the corresponding 16-bit integer element of the +/// first 64-bit integer vector of [4 x i16]. The lower 16 bits of the +/// results are packed into a 64-bit integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSUBW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16] containing the minuends. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16] containing the subtrahends. +/// \returns A 64-bit integer vector of [4 x i16] containing the differences of +/// both parameters. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_sub_pi16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_psubw((__v4hi)__m1, (__v4hi)__m2); } - + +/// \brief Subtracts each 32-bit integer element of the second 64-bit integer +/// vector of [2 x i32] from the corresponding 32-bit integer element of the +/// first 64-bit integer vector of [2 x i32]. The lower 32 bits of the +/// results are packed into a 64-bit integer vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSUBD instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [2 x i32] containing the minuends. +/// \param __m2 +/// A 64-bit integer vector of [2 x i32] containing the subtrahends. +/// \returns A 64-bit integer vector of [2 x i32] containing the differences of +/// both parameters. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_sub_pi32(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_psubd((__v2si)__m1, (__v2si)__m2); } +/// \brief Subtracts each 8-bit signed integer element of the second 64-bit +/// integer vector of [8 x i8] from the corresponding 8-bit signed integer +/// element of the first 64-bit integer vector of [8 x i8]. Positive results +/// greater than 0x7F are saturated to 0x7F. Negative results less than 0x80 +/// are saturated to 0x80. The results are packed into a 64-bit integer +/// vector of [8 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSUBSB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8] containing the minuends. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8] containing the subtrahends. +/// \returns A 64-bit integer vector of [8 x i8] containing the saturated +/// differences of both parameters. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_subs_pi8(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_psubsb((__v8qi)__m1, (__v8qi)__m2); } +/// \brief Subtracts each 16-bit signed integer element of the second 64-bit +/// integer vector of [4 x i16] from the corresponding 16-bit signed integer +/// element of the first 64-bit integer vector of [4 x i16]. Positive results +/// greater than 0x7FFF are saturated to 0x7FFF. Negative results less than +/// 0x8000 are saturated to 0x8000. The results are packed into a 64-bit +/// integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSUBSW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16] containing the minuends. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16] containing the subtrahends. +/// \returns A 64-bit integer vector of [4 x i16] containing the saturated +/// differences of both parameters. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_subs_pi16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_psubsw((__v4hi)__m1, (__v4hi)__m2); } +/// \brief Subtracts each 8-bit unsigned integer element of the second 64-bit +/// integer vector of [8 x i8] from the corresponding 8-bit unsigned integer +/// element of the first 64-bit integer vector of [8 x i8]. If an element of +/// the first vector is less than the corresponding element of the second +/// vector, the result is saturated to 0. The results are packed into a +/// 64-bit integer vector of [8 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSUBUSB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8] containing the minuends. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8] containing the subtrahends. +/// \returns A 64-bit integer vector of [8 x i8] containing the saturated +/// differences of both parameters. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_subs_pu8(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_psubusb((__v8qi)__m1, (__v8qi)__m2); } - + +/// \brief Subtracts each 16-bit unsigned integer element of the second 64-bit +/// integer vector of [4 x i16] from the corresponding 16-bit unsigned +/// integer element of the first 64-bit integer vector of [4 x i16]. If an +/// element of the first vector is less than the corresponding element of the +/// second vector, the result is saturated to 0. The results are packed into +/// a 64-bit integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSUBUSW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16] containing the minuends. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16] containing the subtrahends. +/// \returns A 64-bit integer vector of [4 x i16] containing the saturated +/// differences of both parameters. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_subs_pu16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_psubusw((__v4hi)__m1, (__v4hi)__m2); } +/// \brief Multiplies each 16-bit signed integer element of the first 64-bit +/// integer vector of [4 x i16] by the corresponding 16-bit signed integer +/// element of the second 64-bit integer vector of [4 x i16] and get four +/// 32-bit products. Adds adjacent pairs of products to get two 32-bit sums. +/// The lower 32 bits of these two sums are packed into a 64-bit integer +/// vector of [2 x i32]. For example, bits [15:0] of both parameters are +/// multiplied, bits [31:16] of both parameters are multiplied, and the sum +/// of both results is written to bits [31:0] of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMADDWD instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// \returns A 64-bit integer vector of [2 x i32] containing the sums of +/// products of both parameters. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_madd_pi16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_pmaddwd((__v4hi)__m1, (__v4hi)__m2); } +/// \brief Multiplies each 16-bit signed integer element of the first 64-bit +/// integer vector of [4 x i16] by the corresponding 16-bit signed integer +/// element of the second 64-bit integer vector of [4 x i16]. Packs the upper +/// 16 bits of the 32-bit products into a 64-bit integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMULHW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// \returns A 64-bit integer vector of [4 x i16] containing the upper 16 bits +/// of the products of both parameters. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_mulhi_pi16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_pmulhw((__v4hi)__m1, (__v4hi)__m2); } - + +/// \brief Multiplies each 16-bit signed integer element of the first 64-bit +/// integer vector of [4 x i16] by the corresponding 16-bit signed integer +/// element of the second 64-bit integer vector of [4 x i16]. Packs the lower +/// 16 bits of the 32-bit products into a 64-bit integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMULLW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// \returns A 64-bit integer vector of [4 x i16] containing the lower 16 bits +/// of the products of both parameters. static __inline__ __m64 __DEFAULT_FN_ATTRS -_mm_mullo_pi16(__m64 __m1, __m64 __m2) +_mm_mullo_pi16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_pmullw((__v4hi)__m1, (__v4hi)__m2); } +/// \brief Left-shifts each 16-bit signed integer element of the first +/// parameter, which is a 64-bit integer vector of [4 x i16], by the number +/// of bits specified by the second parameter, which is a 64-bit integer. The +/// lower 16 bits of the results are packed into a 64-bit integer vector of +/// [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSLLW instruction. +/// +/// \param __m +/// A 64-bit integer vector of [4 x i16]. +/// \param __count +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \returns A 64-bit integer vector of [4 x i16] containing the left-shifted +/// values. If \a __count is greater or equal to 16, the result is set to all +/// 0. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_sll_pi16(__m64 __m, __m64 __count) { return (__m64)__builtin_ia32_psllw((__v4hi)__m, __count); } +/// \brief Left-shifts each 16-bit signed integer element of a 64-bit integer +/// vector of [4 x i16] by the number of bits specified by a 32-bit integer. +/// The lower 16 bits of the results are packed into a 64-bit integer vector +/// of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSLLW instruction. +/// +/// \param __m +/// A 64-bit integer vector of [4 x i16]. +/// \param __count +/// A 32-bit integer value. +/// \returns A 64-bit integer vector of [4 x i16] containing the left-shifted +/// values. If \a __count is greater or equal to 16, the result is set to all +/// 0. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_slli_pi16(__m64 __m, int __count) { - return (__m64)__builtin_ia32_psllwi((__v4hi)__m, __count); + return (__m64)__builtin_ia32_psllwi((__v4hi)__m, __count); } +/// \brief Left-shifts each 32-bit signed integer element of the first +/// parameter, which is a 64-bit integer vector of [2 x i32], by the number +/// of bits specified by the second parameter, which is a 64-bit integer. The +/// lower 32 bits of the results are packed into a 64-bit integer vector of +/// [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSLLD instruction. +/// +/// \param __m +/// A 64-bit integer vector of [2 x i32]. +/// \param __count +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \returns A 64-bit integer vector of [2 x i32] containing the left-shifted +/// values. If \a __count is greater or equal to 32, the result is set to all +/// 0. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_sll_pi32(__m64 __m, __m64 __count) { return (__m64)__builtin_ia32_pslld((__v2si)__m, __count); } +/// \brief Left-shifts each 32-bit signed integer element of a 64-bit integer +/// vector of [2 x i32] by the number of bits specified by a 32-bit integer. +/// The lower 32 bits of the results are packed into a 64-bit integer vector +/// of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSLLD instruction. +/// +/// \param __m +/// A 64-bit integer vector of [2 x i32]. +/// \param __count +/// A 32-bit integer value. +/// \returns A 64-bit integer vector of [2 x i32] containing the left-shifted +/// values. If \a __count is greater or equal to 32, the result is set to all +/// 0. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_slli_pi32(__m64 __m, int __count) { return (__m64)__builtin_ia32_pslldi((__v2si)__m, __count); } +/// \brief Left-shifts the first 64-bit integer parameter by the number of bits +/// specified by the second 64-bit integer parameter. The lower 64 bits of +/// result are returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSLLQ instruction. +/// +/// \param __m +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \param __count +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \returns A 64-bit integer vector containing the left-shifted value. If +/// \a __count is greater or equal to 64, the result is set to 0. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_sll_si64(__m64 __m, __m64 __count) { - return (__m64)__builtin_ia32_psllq(__m, __count); + return (__m64)__builtin_ia32_psllq((__v1di)__m, __count); } +/// \brief Left-shifts the first parameter, which is a 64-bit integer, by the +/// number of bits specified by the second parameter, which is a 32-bit +/// integer. The lower 64 bits of result are returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSLLQ instruction. +/// +/// \param __m +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \param __count +/// A 32-bit integer value. +/// \returns A 64-bit integer vector containing the left-shifted value. If +/// \a __count is greater or equal to 64, the result is set to 0. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_slli_si64(__m64 __m, int __count) { - return (__m64)__builtin_ia32_psllqi(__m, __count); + return (__m64)__builtin_ia32_psllqi((__v1di)__m, __count); } +/// \brief Right-shifts each 16-bit integer element of the first parameter, +/// which is a 64-bit integer vector of [4 x i16], by the number of bits +/// specified by the second parameter, which is a 64-bit integer. High-order +/// bits are filled with the sign bit of the initial value of each 16-bit +/// element. The 16-bit results are packed into a 64-bit integer vector of +/// [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRAW instruction. +/// +/// \param __m +/// A 64-bit integer vector of [4 x i16]. +/// \param __count +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \returns A 64-bit integer vector of [4 x i16] containing the right-shifted +/// values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_sra_pi16(__m64 __m, __m64 __count) { - return (__m64)__builtin_ia32_psraw((__v4hi)__m, __count); + return (__m64)__builtin_ia32_psraw((__v4hi)__m, __count); } +/// \brief Right-shifts each 16-bit integer element of a 64-bit integer vector +/// of [4 x i16] by the number of bits specified by a 32-bit integer. +/// High-order bits are filled with the sign bit of the initial value of each +/// 16-bit element. The 16-bit results are packed into a 64-bit integer +/// vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRAW instruction. +/// +/// \param __m +/// A 64-bit integer vector of [4 x i16]. +/// \param __count +/// A 32-bit integer value. +/// \returns A 64-bit integer vector of [4 x i16] containing the right-shifted +/// values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_srai_pi16(__m64 __m, int __count) { return (__m64)__builtin_ia32_psrawi((__v4hi)__m, __count); } +/// \brief Right-shifts each 32-bit integer element of the first parameter, +/// which is a 64-bit integer vector of [2 x i32], by the number of bits +/// specified by the second parameter, which is a 64-bit integer. High-order +/// bits are filled with the sign bit of the initial value of each 32-bit +/// element. The 32-bit results are packed into a 64-bit integer vector of +/// [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRAD instruction. +/// +/// \param __m +/// A 64-bit integer vector of [2 x i32]. +/// \param __count +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \returns A 64-bit integer vector of [2 x i32] containing the right-shifted +/// values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_sra_pi32(__m64 __m, __m64 __count) { - return (__m64)__builtin_ia32_psrad((__v2si)__m, __count); + return (__m64)__builtin_ia32_psrad((__v2si)__m, __count); } +/// \brief Right-shifts each 32-bit integer element of a 64-bit integer vector +/// of [2 x i32] by the number of bits specified by a 32-bit integer. +/// High-order bits are filled with the sign bit of the initial value of each +/// 32-bit element. The 32-bit results are packed into a 64-bit integer +/// vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRAD instruction. +/// +/// \param __m +/// A 64-bit integer vector of [2 x i32]. +/// \param __count +/// A 32-bit integer value. +/// \returns A 64-bit integer vector of [2 x i32] containing the right-shifted +/// values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_srai_pi32(__m64 __m, int __count) { return (__m64)__builtin_ia32_psradi((__v2si)__m, __count); } +/// \brief Right-shifts each 16-bit integer element of the first parameter, +/// which is a 64-bit integer vector of [4 x i16], by the number of bits +/// specified by the second parameter, which is a 64-bit integer. High-order +/// bits are cleared. The 16-bit results are packed into a 64-bit integer +/// vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRLW instruction. +/// +/// \param __m +/// A 64-bit integer vector of [4 x i16]. +/// \param __count +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \returns A 64-bit integer vector of [4 x i16] containing the right-shifted +/// values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_srl_pi16(__m64 __m, __m64 __count) { - return (__m64)__builtin_ia32_psrlw((__v4hi)__m, __count); + return (__m64)__builtin_ia32_psrlw((__v4hi)__m, __count); } +/// \brief Right-shifts each 16-bit integer element of a 64-bit integer vector +/// of [4 x i16] by the number of bits specified by a 32-bit integer. +/// High-order bits are cleared. The 16-bit results are packed into a 64-bit +/// integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRLW instruction. +/// +/// \param __m +/// A 64-bit integer vector of [4 x i16]. +/// \param __count +/// A 32-bit integer value. +/// \returns A 64-bit integer vector of [4 x i16] containing the right-shifted +/// values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_srli_pi16(__m64 __m, int __count) { - return (__m64)__builtin_ia32_psrlwi((__v4hi)__m, __count); + return (__m64)__builtin_ia32_psrlwi((__v4hi)__m, __count); } +/// \brief Right-shifts each 32-bit integer element of the first parameter, +/// which is a 64-bit integer vector of [2 x i32], by the number of bits +/// specified by the second parameter, which is a 64-bit integer. High-order +/// bits are cleared. The 32-bit results are packed into a 64-bit integer +/// vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRLD instruction. +/// +/// \param __m +/// A 64-bit integer vector of [2 x i32]. +/// \param __count +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \returns A 64-bit integer vector of [2 x i32] containing the right-shifted +/// values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_srl_pi32(__m64 __m, __m64 __count) { - return (__m64)__builtin_ia32_psrld((__v2si)__m, __count); + return (__m64)__builtin_ia32_psrld((__v2si)__m, __count); } +/// \brief Right-shifts each 32-bit integer element of a 64-bit integer vector +/// of [2 x i32] by the number of bits specified by a 32-bit integer. +/// High-order bits are cleared. The 32-bit results are packed into a 64-bit +/// integer vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRLD instruction. +/// +/// \param __m +/// A 64-bit integer vector of [2 x i32]. +/// \param __count +/// A 32-bit integer value. +/// \returns A 64-bit integer vector of [2 x i32] containing the right-shifted +/// values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_srli_pi32(__m64 __m, int __count) { return (__m64)__builtin_ia32_psrldi((__v2si)__m, __count); } +/// \brief Right-shifts the first 64-bit integer parameter by the number of bits +/// specified by the second 64-bit integer parameter. High-order bits are +/// cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRLQ instruction. +/// +/// \param __m +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \param __count +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \returns A 64-bit integer vector containing the right-shifted value. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_srl_si64(__m64 __m, __m64 __count) { - return (__m64)__builtin_ia32_psrlq(__m, __count); + return (__m64)__builtin_ia32_psrlq((__v1di)__m, __count); } +/// \brief Right-shifts the first parameter, which is a 64-bit integer, by the +/// number of bits specified by the second parameter, which is a 32-bit +/// integer. High-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRLQ instruction. +/// +/// \param __m +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \param __count +/// A 32-bit integer value. +/// \returns A 64-bit integer vector containing the right-shifted value. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_srli_si64(__m64 __m, int __count) { - return (__m64)__builtin_ia32_psrlqi(__m, __count); + return (__m64)__builtin_ia32_psrlqi((__v1di)__m, __count); } +/// \brief Performs a bitwise AND of two 64-bit integer vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PAND instruction. +/// +/// \param __m1 +/// A 64-bit integer vector. +/// \param __m2 +/// A 64-bit integer vector. +/// \returns A 64-bit integer vector containing the bitwise AND of both +/// parameters. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_and_si64(__m64 __m1, __m64 __m2) { - return __builtin_ia32_pand(__m1, __m2); + return __builtin_ia32_pand((__v1di)__m1, (__v1di)__m2); } +/// \brief Performs a bitwise NOT of the first 64-bit integer vector, and then +/// performs a bitwise AND of the intermediate result and the second 64-bit +/// integer vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PANDN instruction. +/// +/// \param __m1 +/// A 64-bit integer vector. The one's complement of this parameter is used +/// in the bitwise AND. +/// \param __m2 +/// A 64-bit integer vector. +/// \returns A 64-bit integer vector containing the bitwise AND of the second +/// parameter and the one's complement of the first parameter. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_andnot_si64(__m64 __m1, __m64 __m2) { - return __builtin_ia32_pandn(__m1, __m2); + return __builtin_ia32_pandn((__v1di)__m1, (__v1di)__m2); } +/// \brief Performs a bitwise OR of two 64-bit integer vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the POR instruction. +/// +/// \param __m1 +/// A 64-bit integer vector. +/// \param __m2 +/// A 64-bit integer vector. +/// \returns A 64-bit integer vector containing the bitwise OR of both +/// parameters. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_or_si64(__m64 __m1, __m64 __m2) { - return __builtin_ia32_por(__m1, __m2); + return __builtin_ia32_por((__v1di)__m1, (__v1di)__m2); } +/// \brief Performs a bitwise exclusive OR of two 64-bit integer vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PXOR instruction. +/// +/// \param __m1 +/// A 64-bit integer vector. +/// \param __m2 +/// A 64-bit integer vector. +/// \returns A 64-bit integer vector containing the bitwise exclusive OR of both +/// parameters. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_xor_si64(__m64 __m1, __m64 __m2) { - return __builtin_ia32_pxor(__m1, __m2); + return __builtin_ia32_pxor((__v1di)__m1, (__v1di)__m2); } +/// \brief Compares the 8-bit integer elements of two 64-bit integer vectors of +/// [8 x i8] to determine if the element of the first vector is equal to the +/// corresponding element of the second vector. The comparison yields 0 for +/// false, 0xFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PCMPEQB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8]. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8]. +/// \returns A 64-bit integer vector of [8 x i8] containing the comparison +/// results. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_cmpeq_pi8(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_pcmpeqb((__v8qi)__m1, (__v8qi)__m2); } +/// \brief Compares the 16-bit integer elements of two 64-bit integer vectors of +/// [4 x i16] to determine if the element of the first vector is equal to the +/// corresponding element of the second vector. The comparison yields 0 for +/// false, 0xFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PCMPEQW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// \returns A 64-bit integer vector of [4 x i16] containing the comparison +/// results. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_cmpeq_pi16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_pcmpeqw((__v4hi)__m1, (__v4hi)__m2); } +/// \brief Compares the 32-bit integer elements of two 64-bit integer vectors of +/// [2 x i32] to determine if the element of the first vector is equal to the +/// corresponding element of the second vector. The comparison yields 0 for +/// false, 0xFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PCMPEQD instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [2 x i32]. +/// \param __m2 +/// A 64-bit integer vector of [2 x i32]. +/// \returns A 64-bit integer vector of [2 x i32] containing the comparison +/// results. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_cmpeq_pi32(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_pcmpeqd((__v2si)__m1, (__v2si)__m2); } +/// \brief Compares the 8-bit integer elements of two 64-bit integer vectors of +/// [8 x i8] to determine if the element of the first vector is greater than +/// the corresponding element of the second vector. The comparison yields 0 +/// for false, 0xFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PCMPGTB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8]. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8]. +/// \returns A 64-bit integer vector of [8 x i8] containing the comparison +/// results. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_cmpgt_pi8(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_pcmpgtb((__v8qi)__m1, (__v8qi)__m2); } +/// \brief Compares the 16-bit integer elements of two 64-bit integer vectors of +/// [4 x i16] to determine if the element of the first vector is greater than +/// the corresponding element of the second vector. The comparison yields 0 +/// for false, 0xFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PCMPGTW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// \returns A 64-bit integer vector of [4 x i16] containing the comparison +/// results. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_cmpgt_pi16(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_pcmpgtw((__v4hi)__m1, (__v4hi)__m2); } +/// \brief Compares the 32-bit integer elements of two 64-bit integer vectors of +/// [2 x i32] to determine if the element of the first vector is greater than +/// the corresponding element of the second vector. The comparison yields 0 +/// for false, 0xFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PCMPGTD instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [2 x i32]. +/// \param __m2 +/// A 64-bit integer vector of [2 x i32]. +/// \returns A 64-bit integer vector of [2 x i32] containing the comparison +/// results. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_cmpgt_pi32(__m64 __m1, __m64 __m2) { return (__m64)__builtin_ia32_pcmpgtd((__v2si)__m1, (__v2si)__m2); } +/// \brief Constructs a 64-bit integer vector initialized to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the the VXORPS / XORPS instruction. +/// +/// \returns An initialized 64-bit integer vector with all elements set to zero. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_setzero_si64(void) { return (__m64){ 0LL }; } +/// \brief Constructs a 64-bit integer vector initialized with the specified +/// 32-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __i1 +/// A 32-bit integer value used to initialize the upper 32 bits of the +/// result. +/// \param __i0 +/// A 32-bit integer value used to initialize the lower 32 bits of the +/// result. +/// \returns An initialized 64-bit integer vector. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_set_pi32(int __i1, int __i0) { return (__m64)__builtin_ia32_vec_init_v2si(__i0, __i1); } +/// \brief Constructs a 64-bit integer vector initialized with the specified +/// 16-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __s3 +/// A 16-bit integer value used to initialize bits [63:48] of the result. +/// \param __s2 +/// A 16-bit integer value used to initialize bits [47:32] of the result. +/// \param __s1 +/// A 16-bit integer value used to initialize bits [31:16] of the result. +/// \param __s0 +/// A 16-bit integer value used to initialize bits [15:0] of the result. +/// \returns An initialized 64-bit integer vector. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_set_pi16(short __s3, short __s2, short __s1, short __s0) { return (__m64)__builtin_ia32_vec_init_v4hi(__s0, __s1, __s2, __s3); } +/// \brief Constructs a 64-bit integer vector initialized with the specified +/// 8-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __b7 +/// An 8-bit integer value used to initialize bits [63:56] of the result. +/// \param __b6 +/// An 8-bit integer value used to initialize bits [55:48] of the result. +/// \param __b5 +/// An 8-bit integer value used to initialize bits [47:40] of the result. +/// \param __b4 +/// An 8-bit integer value used to initialize bits [39:32] of the result. +/// \param __b3 +/// An 8-bit integer value used to initialize bits [31:24] of the result. +/// \param __b2 +/// An 8-bit integer value used to initialize bits [23:16] of the result. +/// \param __b1 +/// An 8-bit integer value used to initialize bits [15:8] of the result. +/// \param __b0 +/// An 8-bit integer value used to initialize bits [7:0] of the result. +/// \returns An initialized 64-bit integer vector. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0) @@ -405,36 +1354,129 @@ _mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, __b4, __b5, __b6, __b7); } +/// \brief Constructs a 64-bit integer vector of [2 x i32], with each of the +/// 32-bit integer vector elements set to the specified 32-bit integer +/// value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSHUFD / PSHUFD instruction. +/// +/// \param __i +/// A 32-bit integer value used to initialize each vector element of the +/// result. +/// \returns An initialized 64-bit integer vector of [2 x i32]. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_set1_pi32(int __i) { return _mm_set_pi32(__i, __i); } +/// \brief Constructs a 64-bit integer vector of [4 x i16], with each of the +/// 16-bit integer vector elements set to the specified 16-bit integer +/// value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSHUFLW / PSHUFLW instruction. +/// +/// \param __w +/// A 16-bit integer value used to initialize each vector element of the +/// result. +/// \returns An initialized 64-bit integer vector of [4 x i16]. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_set1_pi16(short __w) { return _mm_set_pi16(__w, __w, __w, __w); } +/// \brief Constructs a 64-bit integer vector of [8 x i8], with each of the +/// 8-bit integer vector elements set to the specified 8-bit integer value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKLBW + VPSHUFLW / PUNPCKLBW + +/// PSHUFLW instruction. +/// +/// \param __b +/// An 8-bit integer value used to initialize each vector element of the +/// result. +/// \returns An initialized 64-bit integer vector of [8 x i8]. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_set1_pi8(char __b) { return _mm_set_pi8(__b, __b, __b, __b, __b, __b, __b, __b); } +/// \brief Constructs a 64-bit integer vector, initialized in reverse order with +/// the specified 32-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __i0 +/// A 32-bit integer value used to initialize the lower 32 bits of the +/// result. +/// \param __i1 +/// A 32-bit integer value used to initialize the upper 32 bits of the +/// result. +/// \returns An initialized 64-bit integer vector. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_setr_pi32(int __i0, int __i1) { return _mm_set_pi32(__i1, __i0); } +/// \brief Constructs a 64-bit integer vector, initialized in reverse order with +/// the specified 16-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __w0 +/// A 16-bit integer value used to initialize bits [15:0] of the result. +/// \param __w1 +/// A 16-bit integer value used to initialize bits [31:16] of the result. +/// \param __w2 +/// A 16-bit integer value used to initialize bits [47:32] of the result. +/// \param __w3 +/// A 16-bit integer value used to initialize bits [63:48] of the result. +/// \returns An initialized 64-bit integer vector. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_setr_pi16(short __w0, short __w1, short __w2, short __w3) { return _mm_set_pi16(__w3, __w2, __w1, __w0); } +/// \brief Constructs a 64-bit integer vector, initialized in reverse order with +/// the specified 8-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __b0 +/// An 8-bit integer value used to initialize bits [7:0] of the result. +/// \param __b1 +/// An 8-bit integer value used to initialize bits [15:8] of the result. +/// \param __b2 +/// An 8-bit integer value used to initialize bits [23:16] of the result. +/// \param __b3 +/// An 8-bit integer value used to initialize bits [31:24] of the result. +/// \param __b4 +/// An 8-bit integer value used to initialize bits [39:32] of the result. +/// \param __b5 +/// An 8-bit integer value used to initialize bits [47:40] of the result. +/// \param __b6 +/// An 8-bit integer value used to initialize bits [55:48] of the result. +/// \param __b7 +/// An 8-bit integer value used to initialize bits [63:56] of the result. +/// \returns An initialized 64-bit integer vector. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_setr_pi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7) @@ -447,7 +1489,9 @@ _mm_setr_pi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, /* Aliases for compatibility. */ #define _m_empty _mm_empty #define _m_from_int _mm_cvtsi32_si64 +#define _m_from_int64 _mm_cvtsi64_m64 #define _m_to_int _mm_cvtsi64_si32 +#define _m_to_int64 _mm_cvtm64_si64 #define _m_packsswb _mm_packs_pi16 #define _m_packssdw _mm_packs_pi32 #define _m_packuswb _mm_packs_pu16 @@ -501,7 +1545,5 @@ _mm_setr_pi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, #define _m_pcmpgtw _mm_cmpgt_pi16 #define _m_pcmpgtd _mm_cmpgt_pi32 -#endif /* __MMX__ */ - #endif /* __MMINTRIN_H */ diff --git a/c_headers/module.modulemap b/c_headers/module.modulemap new file mode 100644 index 000000000..11ef2f902 --- /dev/null +++ b/c_headers/module.modulemap @@ -0,0 +1,166 @@ +/*===---- module.modulemap - intrinsics module map -------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +module _Builtin_intrinsics [system] [extern_c] { + explicit module altivec { + requires altivec + header "altivec.h" + } + + explicit module arm { + requires arm + + explicit module acle { + header "arm_acle.h" + export * + } + + explicit module neon { + requires neon + header "arm_neon.h" + export * + } + } + + explicit module intel { + requires x86 + export * + + header "immintrin.h" + textual header "f16cintrin.h" + textual header "avxintrin.h" + textual header "avx2intrin.h" + textual header "avx512fintrin.h" + textual header "avx512erintrin.h" + textual header "fmaintrin.h" + + header "x86intrin.h" + textual header "bmiintrin.h" + textual header "bmi2intrin.h" + textual header "lzcntintrin.h" + textual header "xopintrin.h" + textual header "fma4intrin.h" + textual header "mwaitxintrin.h" + + explicit module mm_malloc { + requires !freestanding + header "mm_malloc.h" + export * // note: for dependency + } + + explicit module cpuid { + requires gnuinlineasm + header "cpuid.h" + } + + explicit module mmx { + header "mmintrin.h" + } + + explicit module sse { + export mm_malloc + export mmx + export sse2 // note: for hackish dependency + header "xmmintrin.h" + } + + explicit module sse2 { + export sse + header "emmintrin.h" + } + + explicit module sse3 { + export sse2 + header "pmmintrin.h" + } + + explicit module ssse3 { + export sse3 + header "tmmintrin.h" + } + + explicit module sse4_1 { + export ssse3 + header "smmintrin.h" + } + + explicit module sse4_2 { + export sse4_1 + header "nmmintrin.h" + } + + explicit module sse4a { + export sse3 + header "ammintrin.h" + } + + explicit module popcnt { + header "popcntintrin.h" + } + + explicit module mm3dnow { + header "mm3dnow.h" + } + + explicit module aes_pclmul { + header "wmmintrin.h" + export aes + export pclmul + } + + explicit module aes { + header "__wmmintrin_aes.h" + } + + explicit module pclmul { + header "__wmmintrin_pclmul.h" + } + } + + explicit module systemz { + requires systemz + export * + + header "s390intrin.h" + + explicit module htm { + requires htm + header "htmintrin.h" + header "htmxlintrin.h" + } + + explicit module zvector { + requires zvector, vx + header "vecintrin.h" + } + } +} + +module _Builtin_stddef_max_align_t [system] [extern_c] { + header "__stddef_max_align_t.h" +} + +module opencl_c { + requires opencl + header "opencl-c.h" +} diff --git a/c_headers/msa.h b/c_headers/msa.h new file mode 100644 index 000000000..da680f5ca --- /dev/null +++ b/c_headers/msa.h @@ -0,0 +1,583 @@ +/*===---- msa.h - MIPS MSA intrinsics --------------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef _MSA_H +#define _MSA_H 1 + +#if defined(__mips_msa) +typedef signed char v16i8 __attribute__((vector_size(16), aligned(16))); +typedef signed char v16i8_b __attribute__((vector_size(16), aligned(1))); +typedef unsigned char v16u8 __attribute__((vector_size(16), aligned(16))); +typedef unsigned char v16u8_b __attribute__((vector_size(16), aligned(1))); +typedef short v8i16 __attribute__((vector_size(16), aligned(16))); +typedef short v8i16_h __attribute__((vector_size(16), aligned(2))); +typedef unsigned short v8u16 __attribute__((vector_size(16), aligned(16))); +typedef unsigned short v8u16_h __attribute__((vector_size(16), aligned(2))); +typedef int v4i32 __attribute__((vector_size(16), aligned(16))); +typedef int v4i32_w __attribute__((vector_size(16), aligned(4))); +typedef unsigned int v4u32 __attribute__((vector_size(16), aligned(16))); +typedef unsigned int v4u32_w __attribute__((vector_size(16), aligned(4))); +typedef long long v2i64 __attribute__((vector_size(16), aligned(16))); +typedef long long v2i64_d __attribute__((vector_size(16), aligned(8))); +typedef unsigned long long v2u64 __attribute__((vector_size(16), aligned(16))); +typedef unsigned long long v2u64_d __attribute__((vector_size(16), aligned(8))); +typedef float v4f32 __attribute__((vector_size(16), aligned(16))); +typedef float v4f32_w __attribute__((vector_size(16), aligned(4))); +typedef double v2f64 __attribute__ ((vector_size(16), aligned(16))); +typedef double v2f64_d __attribute__ ((vector_size(16), aligned(8))); + +#define __msa_sll_b __builtin_msa_sll_b +#define __msa_sll_h __builtin_msa_sll_h +#define __msa_sll_w __builtin_msa_sll_w +#define __msa_sll_d __builtin_msa_sll_d +#define __msa_slli_b __builtin_msa_slli_b +#define __msa_slli_h __builtin_msa_slli_h +#define __msa_slli_w __builtin_msa_slli_w +#define __msa_slli_d __builtin_msa_slli_d +#define __msa_sra_b __builtin_msa_sra_b +#define __msa_sra_h __builtin_msa_sra_h +#define __msa_sra_w __builtin_msa_sra_w +#define __msa_sra_d __builtin_msa_sra_d +#define __msa_srai_b __builtin_msa_srai_b +#define __msa_srai_h __builtin_msa_srai_h +#define __msa_srai_w __builtin_msa_srai_w +#define __msa_srai_d __builtin_msa_srai_d +#define __msa_srar_b __builtin_msa_srar_b +#define __msa_srar_h __builtin_msa_srar_h +#define __msa_srar_w __builtin_msa_srar_w +#define __msa_srar_d __builtin_msa_srar_d +#define __msa_srari_b __builtin_msa_srari_b +#define __msa_srari_h __builtin_msa_srari_h +#define __msa_srari_w __builtin_msa_srari_w +#define __msa_srari_d __builtin_msa_srari_d +#define __msa_srl_b __builtin_msa_srl_b +#define __msa_srl_h __builtin_msa_srl_h +#define __msa_srl_w __builtin_msa_srl_w +#define __msa_srl_d __builtin_msa_srl_d +#define __msa_srli_b __builtin_msa_srli_b +#define __msa_srli_h __builtin_msa_srli_h +#define __msa_srli_w __builtin_msa_srli_w +#define __msa_srli_d __builtin_msa_srli_d +#define __msa_srlr_b __builtin_msa_srlr_b +#define __msa_srlr_h __builtin_msa_srlr_h +#define __msa_srlr_w __builtin_msa_srlr_w +#define __msa_srlr_d __builtin_msa_srlr_d +#define __msa_srlri_b __builtin_msa_srlri_b +#define __msa_srlri_h __builtin_msa_srlri_h +#define __msa_srlri_w __builtin_msa_srlri_w +#define __msa_srlri_d __builtin_msa_srlri_d +#define __msa_bclr_b __builtin_msa_bclr_b +#define __msa_bclr_h __builtin_msa_bclr_h +#define __msa_bclr_w __builtin_msa_bclr_w +#define __msa_bclr_d __builtin_msa_bclr_d +#define __msa_bclri_b __builtin_msa_bclri_b +#define __msa_bclri_h __builtin_msa_bclri_h +#define __msa_bclri_w __builtin_msa_bclri_w +#define __msa_bclri_d __builtin_msa_bclri_d +#define __msa_bset_b __builtin_msa_bset_b +#define __msa_bset_h __builtin_msa_bset_h +#define __msa_bset_w __builtin_msa_bset_w +#define __msa_bset_d __builtin_msa_bset_d +#define __msa_bseti_b __builtin_msa_bseti_b +#define __msa_bseti_h __builtin_msa_bseti_h +#define __msa_bseti_w __builtin_msa_bseti_w +#define __msa_bseti_d __builtin_msa_bseti_d +#define __msa_bneg_b __builtin_msa_bneg_b +#define __msa_bneg_h __builtin_msa_bneg_h +#define __msa_bneg_w __builtin_msa_bneg_w +#define __msa_bneg_d __builtin_msa_bneg_d +#define __msa_bnegi_b __builtin_msa_bnegi_b +#define __msa_bnegi_h __builtin_msa_bnegi_h +#define __msa_bnegi_w __builtin_msa_bnegi_w +#define __msa_bnegi_d __builtin_msa_bnegi_d +#define __msa_binsl_b __builtin_msa_binsl_b +#define __msa_binsl_h __builtin_msa_binsl_h +#define __msa_binsl_w __builtin_msa_binsl_w +#define __msa_binsl_d __builtin_msa_binsl_d +#define __msa_binsli_b __builtin_msa_binsli_b +#define __msa_binsli_h __builtin_msa_binsli_h +#define __msa_binsli_w __builtin_msa_binsli_w +#define __msa_binsli_d __builtin_msa_binsli_d +#define __msa_binsr_b __builtin_msa_binsr_b +#define __msa_binsr_h __builtin_msa_binsr_h +#define __msa_binsr_w __builtin_msa_binsr_w +#define __msa_binsr_d __builtin_msa_binsr_d +#define __msa_binsri_b __builtin_msa_binsri_b +#define __msa_binsri_h __builtin_msa_binsri_h +#define __msa_binsri_w __builtin_msa_binsri_w +#define __msa_binsri_d __builtin_msa_binsri_d +#define __msa_addv_b __builtin_msa_addv_b +#define __msa_addv_h __builtin_msa_addv_h +#define __msa_addv_w __builtin_msa_addv_w +#define __msa_addv_d __builtin_msa_addv_d +#define __msa_addvi_b __builtin_msa_addvi_b +#define __msa_addvi_h __builtin_msa_addvi_h +#define __msa_addvi_w __builtin_msa_addvi_w +#define __msa_addvi_d __builtin_msa_addvi_d +#define __msa_subv_b __builtin_msa_subv_b +#define __msa_subv_h __builtin_msa_subv_h +#define __msa_subv_w __builtin_msa_subv_w +#define __msa_subv_d __builtin_msa_subv_d +#define __msa_subvi_b __builtin_msa_subvi_b +#define __msa_subvi_h __builtin_msa_subvi_h +#define __msa_subvi_w __builtin_msa_subvi_w +#define __msa_subvi_d __builtin_msa_subvi_d +#define __msa_max_s_b __builtin_msa_max_s_b +#define __msa_max_s_h __builtin_msa_max_s_h +#define __msa_max_s_w __builtin_msa_max_s_w +#define __msa_max_s_d __builtin_msa_max_s_d +#define __msa_maxi_s_b __builtin_msa_maxi_s_b +#define __msa_maxi_s_h __builtin_msa_maxi_s_h +#define __msa_maxi_s_w __builtin_msa_maxi_s_w +#define __msa_maxi_s_d __builtin_msa_maxi_s_d +#define __msa_max_u_b __builtin_msa_max_u_b +#define __msa_max_u_h __builtin_msa_max_u_h +#define __msa_max_u_w __builtin_msa_max_u_w +#define __msa_max_u_d __builtin_msa_max_u_d +#define __msa_maxi_u_b __builtin_msa_maxi_u_b +#define __msa_maxi_u_h __builtin_msa_maxi_u_h +#define __msa_maxi_u_w __builtin_msa_maxi_u_w +#define __msa_maxi_u_d __builtin_msa_maxi_u_d +#define __msa_min_s_b __builtin_msa_min_s_b +#define __msa_min_s_h __builtin_msa_min_s_h +#define __msa_min_s_w __builtin_msa_min_s_w +#define __msa_min_s_d __builtin_msa_min_s_d +#define __msa_mini_s_b __builtin_msa_mini_s_b +#define __msa_mini_s_h __builtin_msa_mini_s_h +#define __msa_mini_s_w __builtin_msa_mini_s_w +#define __msa_mini_s_d __builtin_msa_mini_s_d +#define __msa_min_u_b __builtin_msa_min_u_b +#define __msa_min_u_h __builtin_msa_min_u_h +#define __msa_min_u_w __builtin_msa_min_u_w +#define __msa_min_u_d __builtin_msa_min_u_d +#define __msa_mini_u_b __builtin_msa_mini_u_b +#define __msa_mini_u_h __builtin_msa_mini_u_h +#define __msa_mini_u_w __builtin_msa_mini_u_w +#define __msa_mini_u_d __builtin_msa_mini_u_d +#define __msa_max_a_b __builtin_msa_max_a_b +#define __msa_max_a_h __builtin_msa_max_a_h +#define __msa_max_a_w __builtin_msa_max_a_w +#define __msa_max_a_d __builtin_msa_max_a_d +#define __msa_min_a_b __builtin_msa_min_a_b +#define __msa_min_a_h __builtin_msa_min_a_h +#define __msa_min_a_w __builtin_msa_min_a_w +#define __msa_min_a_d __builtin_msa_min_a_d +#define __msa_ceq_b __builtin_msa_ceq_b +#define __msa_ceq_h __builtin_msa_ceq_h +#define __msa_ceq_w __builtin_msa_ceq_w +#define __msa_ceq_d __builtin_msa_ceq_d +#define __msa_ceqi_b __builtin_msa_ceqi_b +#define __msa_ceqi_h __builtin_msa_ceqi_h +#define __msa_ceqi_w __builtin_msa_ceqi_w +#define __msa_ceqi_d __builtin_msa_ceqi_d +#define __msa_clt_s_b __builtin_msa_clt_s_b +#define __msa_clt_s_h __builtin_msa_clt_s_h +#define __msa_clt_s_w __builtin_msa_clt_s_w +#define __msa_clt_s_d __builtin_msa_clt_s_d +#define __msa_clti_s_b __builtin_msa_clti_s_b +#define __msa_clti_s_h __builtin_msa_clti_s_h +#define __msa_clti_s_w __builtin_msa_clti_s_w +#define __msa_clti_s_d __builtin_msa_clti_s_d +#define __msa_clt_u_b __builtin_msa_clt_u_b +#define __msa_clt_u_h __builtin_msa_clt_u_h +#define __msa_clt_u_w __builtin_msa_clt_u_w +#define __msa_clt_u_d __builtin_msa_clt_u_d +#define __msa_clti_u_b __builtin_msa_clti_u_b +#define __msa_clti_u_h __builtin_msa_clti_u_h +#define __msa_clti_u_w __builtin_msa_clti_u_w +#define __msa_clti_u_d __builtin_msa_clti_u_d +#define __msa_cle_s_b __builtin_msa_cle_s_b +#define __msa_cle_s_h __builtin_msa_cle_s_h +#define __msa_cle_s_w __builtin_msa_cle_s_w +#define __msa_cle_s_d __builtin_msa_cle_s_d +#define __msa_clei_s_b __builtin_msa_clei_s_b +#define __msa_clei_s_h __builtin_msa_clei_s_h +#define __msa_clei_s_w __builtin_msa_clei_s_w +#define __msa_clei_s_d __builtin_msa_clei_s_d +#define __msa_cle_u_b __builtin_msa_cle_u_b +#define __msa_cle_u_h __builtin_msa_cle_u_h +#define __msa_cle_u_w __builtin_msa_cle_u_w +#define __msa_cle_u_d __builtin_msa_cle_u_d +#define __msa_clei_u_b __builtin_msa_clei_u_b +#define __msa_clei_u_h __builtin_msa_clei_u_h +#define __msa_clei_u_w __builtin_msa_clei_u_w +#define __msa_clei_u_d __builtin_msa_clei_u_d +#define __msa_ld_b __builtin_msa_ld_b +#define __msa_ld_h __builtin_msa_ld_h +#define __msa_ld_w __builtin_msa_ld_w +#define __msa_ld_d __builtin_msa_ld_d +#define __msa_st_b __builtin_msa_st_b +#define __msa_st_h __builtin_msa_st_h +#define __msa_st_w __builtin_msa_st_w +#define __msa_st_d __builtin_msa_st_d +#define __msa_sat_s_b __builtin_msa_sat_s_b +#define __msa_sat_s_h __builtin_msa_sat_s_h +#define __msa_sat_s_w __builtin_msa_sat_s_w +#define __msa_sat_s_d __builtin_msa_sat_s_d +#define __msa_sat_u_b __builtin_msa_sat_u_b +#define __msa_sat_u_h __builtin_msa_sat_u_h +#define __msa_sat_u_w __builtin_msa_sat_u_w +#define __msa_sat_u_d __builtin_msa_sat_u_d +#define __msa_add_a_b __builtin_msa_add_a_b +#define __msa_add_a_h __builtin_msa_add_a_h +#define __msa_add_a_w __builtin_msa_add_a_w +#define __msa_add_a_d __builtin_msa_add_a_d +#define __msa_adds_a_b __builtin_msa_adds_a_b +#define __msa_adds_a_h __builtin_msa_adds_a_h +#define __msa_adds_a_w __builtin_msa_adds_a_w +#define __msa_adds_a_d __builtin_msa_adds_a_d +#define __msa_adds_s_b __builtin_msa_adds_s_b +#define __msa_adds_s_h __builtin_msa_adds_s_h +#define __msa_adds_s_w __builtin_msa_adds_s_w +#define __msa_adds_s_d __builtin_msa_adds_s_d +#define __msa_adds_u_b __builtin_msa_adds_u_b +#define __msa_adds_u_h __builtin_msa_adds_u_h +#define __msa_adds_u_w __builtin_msa_adds_u_w +#define __msa_adds_u_d __builtin_msa_adds_u_d +#define __msa_ave_s_b __builtin_msa_ave_s_b +#define __msa_ave_s_h __builtin_msa_ave_s_h +#define __msa_ave_s_w __builtin_msa_ave_s_w +#define __msa_ave_s_d __builtin_msa_ave_s_d +#define __msa_ave_u_b __builtin_msa_ave_u_b +#define __msa_ave_u_h __builtin_msa_ave_u_h +#define __msa_ave_u_w __builtin_msa_ave_u_w +#define __msa_ave_u_d __builtin_msa_ave_u_d +#define __msa_aver_s_b __builtin_msa_aver_s_b +#define __msa_aver_s_h __builtin_msa_aver_s_h +#define __msa_aver_s_w __builtin_msa_aver_s_w +#define __msa_aver_s_d __builtin_msa_aver_s_d +#define __msa_aver_u_b __builtin_msa_aver_u_b +#define __msa_aver_u_h __builtin_msa_aver_u_h +#define __msa_aver_u_w __builtin_msa_aver_u_w +#define __msa_aver_u_d __builtin_msa_aver_u_d +#define __msa_subs_s_b __builtin_msa_subs_s_b +#define __msa_subs_s_h __builtin_msa_subs_s_h +#define __msa_subs_s_w __builtin_msa_subs_s_w +#define __msa_subs_s_d __builtin_msa_subs_s_d +#define __msa_subs_u_b __builtin_msa_subs_u_b +#define __msa_subs_u_h __builtin_msa_subs_u_h +#define __msa_subs_u_w __builtin_msa_subs_u_w +#define __msa_subs_u_d __builtin_msa_subs_u_d +#define __msa_subsuu_s_b __builtin_msa_subsuu_s_b +#define __msa_subsuu_s_h __builtin_msa_subsuu_s_h +#define __msa_subsuu_s_w __builtin_msa_subsuu_s_w +#define __msa_subsuu_s_d __builtin_msa_subsuu_s_d +#define __msa_subsus_u_b __builtin_msa_subsus_u_b +#define __msa_subsus_u_h __builtin_msa_subsus_u_h +#define __msa_subsus_u_w __builtin_msa_subsus_u_w +#define __msa_subsus_u_d __builtin_msa_subsus_u_d +#define __msa_asub_s_b __builtin_msa_asub_s_b +#define __msa_asub_s_h __builtin_msa_asub_s_h +#define __msa_asub_s_w __builtin_msa_asub_s_w +#define __msa_asub_s_d __builtin_msa_asub_s_d +#define __msa_asub_u_b __builtin_msa_asub_u_b +#define __msa_asub_u_h __builtin_msa_asub_u_h +#define __msa_asub_u_w __builtin_msa_asub_u_w +#define __msa_asub_u_d __builtin_msa_asub_u_d +#define __msa_mulv_b __builtin_msa_mulv_b +#define __msa_mulv_h __builtin_msa_mulv_h +#define __msa_mulv_w __builtin_msa_mulv_w +#define __msa_mulv_d __builtin_msa_mulv_d +#define __msa_maddv_b __builtin_msa_maddv_b +#define __msa_maddv_h __builtin_msa_maddv_h +#define __msa_maddv_w __builtin_msa_maddv_w +#define __msa_maddv_d __builtin_msa_maddv_d +#define __msa_msubv_b __builtin_msa_msubv_b +#define __msa_msubv_h __builtin_msa_msubv_h +#define __msa_msubv_w __builtin_msa_msubv_w +#define __msa_msubv_d __builtin_msa_msubv_d +#define __msa_div_s_b __builtin_msa_div_s_b +#define __msa_div_s_h __builtin_msa_div_s_h +#define __msa_div_s_w __builtin_msa_div_s_w +#define __msa_div_s_d __builtin_msa_div_s_d +#define __msa_div_u_b __builtin_msa_div_u_b +#define __msa_div_u_h __builtin_msa_div_u_h +#define __msa_div_u_w __builtin_msa_div_u_w +#define __msa_div_u_d __builtin_msa_div_u_d +#define __msa_hadd_s_h __builtin_msa_hadd_s_h +#define __msa_hadd_s_w __builtin_msa_hadd_s_w +#define __msa_hadd_s_d __builtin_msa_hadd_s_d +#define __msa_hadd_u_h __builtin_msa_hadd_u_h +#define __msa_hadd_u_w __builtin_msa_hadd_u_w +#define __msa_hadd_u_d __builtin_msa_hadd_u_d +#define __msa_hsub_s_h __builtin_msa_hsub_s_h +#define __msa_hsub_s_w __builtin_msa_hsub_s_w +#define __msa_hsub_s_d __builtin_msa_hsub_s_d +#define __msa_hsub_u_h __builtin_msa_hsub_u_h +#define __msa_hsub_u_w __builtin_msa_hsub_u_w +#define __msa_hsub_u_d __builtin_msa_hsub_u_d +#define __msa_mod_s_b __builtin_msa_mod_s_b +#define __msa_mod_s_h __builtin_msa_mod_s_h +#define __msa_mod_s_w __builtin_msa_mod_s_w +#define __msa_mod_s_d __builtin_msa_mod_s_d +#define __msa_mod_u_b __builtin_msa_mod_u_b +#define __msa_mod_u_h __builtin_msa_mod_u_h +#define __msa_mod_u_w __builtin_msa_mod_u_w +#define __msa_mod_u_d __builtin_msa_mod_u_d +#define __msa_dotp_s_h __builtin_msa_dotp_s_h +#define __msa_dotp_s_w __builtin_msa_dotp_s_w +#define __msa_dotp_s_d __builtin_msa_dotp_s_d +#define __msa_dotp_u_h __builtin_msa_dotp_u_h +#define __msa_dotp_u_w __builtin_msa_dotp_u_w +#define __msa_dotp_u_d __builtin_msa_dotp_u_d +#define __msa_dpadd_s_h __builtin_msa_dpadd_s_h +#define __msa_dpadd_s_w __builtin_msa_dpadd_s_w +#define __msa_dpadd_s_d __builtin_msa_dpadd_s_d +#define __msa_dpadd_u_h __builtin_msa_dpadd_u_h +#define __msa_dpadd_u_w __builtin_msa_dpadd_u_w +#define __msa_dpadd_u_d __builtin_msa_dpadd_u_d +#define __msa_dpsub_s_h __builtin_msa_dpsub_s_h +#define __msa_dpsub_s_w __builtin_msa_dpsub_s_w +#define __msa_dpsub_s_d __builtin_msa_dpsub_s_d +#define __msa_dpsub_u_h __builtin_msa_dpsub_u_h +#define __msa_dpsub_u_w __builtin_msa_dpsub_u_w +#define __msa_dpsub_u_d __builtin_msa_dpsub_u_d +#define __msa_sld_b __builtin_msa_sld_b +#define __msa_sld_h __builtin_msa_sld_h +#define __msa_sld_w __builtin_msa_sld_w +#define __msa_sld_d __builtin_msa_sld_d +#define __msa_sldi_b __builtin_msa_sldi_b +#define __msa_sldi_h __builtin_msa_sldi_h +#define __msa_sldi_w __builtin_msa_sldi_w +#define __msa_sldi_d __builtin_msa_sldi_d +#define __msa_splat_b __builtin_msa_splat_b +#define __msa_splat_h __builtin_msa_splat_h +#define __msa_splat_w __builtin_msa_splat_w +#define __msa_splat_d __builtin_msa_splat_d +#define __msa_splati_b __builtin_msa_splati_b +#define __msa_splati_h __builtin_msa_splati_h +#define __msa_splati_w __builtin_msa_splati_w +#define __msa_splati_d __builtin_msa_splati_d +#define __msa_pckev_b __builtin_msa_pckev_b +#define __msa_pckev_h __builtin_msa_pckev_h +#define __msa_pckev_w __builtin_msa_pckev_w +#define __msa_pckev_d __builtin_msa_pckev_d +#define __msa_pckod_b __builtin_msa_pckod_b +#define __msa_pckod_h __builtin_msa_pckod_h +#define __msa_pckod_w __builtin_msa_pckod_w +#define __msa_pckod_d __builtin_msa_pckod_d +#define __msa_ilvl_b __builtin_msa_ilvl_b +#define __msa_ilvl_h __builtin_msa_ilvl_h +#define __msa_ilvl_w __builtin_msa_ilvl_w +#define __msa_ilvl_d __builtin_msa_ilvl_d +#define __msa_ilvr_b __builtin_msa_ilvr_b +#define __msa_ilvr_h __builtin_msa_ilvr_h +#define __msa_ilvr_w __builtin_msa_ilvr_w +#define __msa_ilvr_d __builtin_msa_ilvr_d +#define __msa_ilvev_b __builtin_msa_ilvev_b +#define __msa_ilvev_h __builtin_msa_ilvev_h +#define __msa_ilvev_w __builtin_msa_ilvev_w +#define __msa_ilvev_d __builtin_msa_ilvev_d +#define __msa_ilvod_b __builtin_msa_ilvod_b +#define __msa_ilvod_h __builtin_msa_ilvod_h +#define __msa_ilvod_w __builtin_msa_ilvod_w +#define __msa_ilvod_d __builtin_msa_ilvod_d +#define __msa_vshf_b __builtin_msa_vshf_b +#define __msa_vshf_h __builtin_msa_vshf_h +#define __msa_vshf_w __builtin_msa_vshf_w +#define __msa_vshf_d __builtin_msa_vshf_d +#define __msa_and_v __builtin_msa_and_v +#define __msa_andi_b __builtin_msa_andi_b +#define __msa_or_v __builtin_msa_or_v +#define __msa_ori_b __builtin_msa_ori_b +#define __msa_nor_v __builtin_msa_nor_v +#define __msa_nori_b __builtin_msa_nori_b +#define __msa_xor_v __builtin_msa_xor_v +#define __msa_xori_b __builtin_msa_xori_b +#define __msa_bmnz_v __builtin_msa_bmnz_v +#define __msa_bmnzi_b __builtin_msa_bmnzi_b +#define __msa_bmz_v __builtin_msa_bmz_v +#define __msa_bmzi_b __builtin_msa_bmzi_b +#define __msa_bsel_v __builtin_msa_bsel_v +#define __msa_bseli_b __builtin_msa_bseli_b +#define __msa_shf_b __builtin_msa_shf_b +#define __msa_shf_h __builtin_msa_shf_h +#define __msa_shf_w __builtin_msa_shf_w +#define __msa_test_bnz_v __builtin_msa_bnz_v +#define __msa_test_bz_v __builtin_msa_bz_v +#define __msa_fill_b __builtin_msa_fill_b +#define __msa_fill_h __builtin_msa_fill_h +#define __msa_fill_w __builtin_msa_fill_w +#define __msa_fill_d __builtin_msa_fill_d +#define __msa_pcnt_b __builtin_msa_pcnt_b +#define __msa_pcnt_h __builtin_msa_pcnt_h +#define __msa_pcnt_w __builtin_msa_pcnt_w +#define __msa_pcnt_d __builtin_msa_pcnt_d +#define __msa_nloc_b __builtin_msa_nloc_b +#define __msa_nloc_h __builtin_msa_nloc_h +#define __msa_nloc_w __builtin_msa_nloc_w +#define __msa_nloc_d __builtin_msa_nloc_d +#define __msa_nlzc_b __builtin_msa_nlzc_b +#define __msa_nlzc_h __builtin_msa_nlzc_h +#define __msa_nlzc_w __builtin_msa_nlzc_w +#define __msa_nlzc_d __builtin_msa_nlzc_d +#define __msa_copy_s_b __builtin_msa_copy_s_b +#define __msa_copy_s_h __builtin_msa_copy_s_h +#define __msa_copy_s_w __builtin_msa_copy_s_w +#define __msa_copy_s_d __builtin_msa_copy_s_d +#define __msa_copy_u_b __builtin_msa_copy_u_b +#define __msa_copy_u_h __builtin_msa_copy_u_h +#define __msa_copy_u_w __builtin_msa_copy_u_w +#define __msa_copy_u_d __builtin_msa_copy_u_d +#define __msa_insert_b __builtin_msa_insert_b +#define __msa_insert_h __builtin_msa_insert_h +#define __msa_insert_w __builtin_msa_insert_w +#define __msa_insert_d __builtin_msa_insert_d +#define __msa_insve_b __builtin_msa_insve_b +#define __msa_insve_h __builtin_msa_insve_h +#define __msa_insve_w __builtin_msa_insve_w +#define __msa_insve_d __builtin_msa_insve_d +#define __msa_test_bnz_b __builtin_msa_bnz_b +#define __msa_test_bnz_h __builtin_msa_bnz_h +#define __msa_test_bnz_w __builtin_msa_bnz_w +#define __msa_test_bnz_d __builtin_msa_bnz_d +#define __msa_test_bz_b __builtin_msa_bz_b +#define __msa_test_bz_h __builtin_msa_bz_h +#define __msa_test_bz_w __builtin_msa_bz_w +#define __msa_test_bz_d __builtin_msa_bz_d +#define __msa_ldi_b __builtin_msa_ldi_b +#define __msa_ldi_h __builtin_msa_ldi_h +#define __msa_ldi_w __builtin_msa_ldi_w +#define __msa_ldi_d __builtin_msa_ldi_d +#define __msa_fcaf_w __builtin_msa_fcaf_w +#define __msa_fcaf_d __builtin_msa_fcaf_d +#define __msa_fcor_w __builtin_msa_fcor_w +#define __msa_fcor_d __builtin_msa_fcor_d +#define __msa_fcun_w __builtin_msa_fcun_w +#define __msa_fcun_d __builtin_msa_fcun_d +#define __msa_fcune_w __builtin_msa_fcune_w +#define __msa_fcune_d __builtin_msa_fcune_d +#define __msa_fcueq_w __builtin_msa_fcueq_w +#define __msa_fcueq_d __builtin_msa_fcueq_d +#define __msa_fceq_w __builtin_msa_fceq_w +#define __msa_fceq_d __builtin_msa_fceq_d +#define __msa_fcne_w __builtin_msa_fcne_w +#define __msa_fcne_d __builtin_msa_fcne_d +#define __msa_fclt_w __builtin_msa_fclt_w +#define __msa_fclt_d __builtin_msa_fclt_d +#define __msa_fcult_w __builtin_msa_fcult_w +#define __msa_fcult_d __builtin_msa_fcult_d +#define __msa_fcle_w __builtin_msa_fcle_w +#define __msa_fcle_d __builtin_msa_fcle_d +#define __msa_fcule_w __builtin_msa_fcule_w +#define __msa_fcule_d __builtin_msa_fcule_d +#define __msa_fsaf_w __builtin_msa_fsaf_w +#define __msa_fsaf_d __builtin_msa_fsaf_d +#define __msa_fsor_w __builtin_msa_fsor_w +#define __msa_fsor_d __builtin_msa_fsor_d +#define __msa_fsun_w __builtin_msa_fsun_w +#define __msa_fsun_d __builtin_msa_fsun_d +#define __msa_fsune_w __builtin_msa_fsune_w +#define __msa_fsune_d __builtin_msa_fsune_d +#define __msa_fsueq_w __builtin_msa_fsueq_w +#define __msa_fsueq_d __builtin_msa_fsueq_d +#define __msa_fseq_w __builtin_msa_fseq_w +#define __msa_fseq_d __builtin_msa_fseq_d +#define __msa_fsne_w __builtin_msa_fsne_w +#define __msa_fsne_d __builtin_msa_fsne_d +#define __msa_fslt_w __builtin_msa_fslt_w +#define __msa_fslt_d __builtin_msa_fslt_d +#define __msa_fsult_w __builtin_msa_fsult_w +#define __msa_fsult_d __builtin_msa_fsult_d +#define __msa_fsle_w __builtin_msa_fsle_w +#define __msa_fsle_d __builtin_msa_fsle_d +#define __msa_fsule_w __builtin_msa_fsule_w +#define __msa_fsule_d __builtin_msa_fsule_d +#define __msa_fadd_w __builtin_msa_fadd_w +#define __msa_fadd_d __builtin_msa_fadd_d +#define __msa_fsub_w __builtin_msa_fsub_w +#define __msa_fsub_d __builtin_msa_fsub_d +#define __msa_fmul_w __builtin_msa_fmul_w +#define __msa_fmul_d __builtin_msa_fmul_d +#define __msa_fdiv_w __builtin_msa_fdiv_w +#define __msa_fdiv_d __builtin_msa_fdiv_d +#define __msa_fmadd_w __builtin_msa_fmadd_w +#define __msa_fmadd_d __builtin_msa_fmadd_d +#define __msa_fmsub_w __builtin_msa_fmsub_w +#define __msa_fmsub_d __builtin_msa_fmsub_d +#define __msa_fexp2_w __builtin_msa_fexp2_w +#define __msa_fexp2_d __builtin_msa_fexp2_d +#define __msa_fexdo_h __builtin_msa_fexdo_h +#define __msa_fexdo_w __builtin_msa_fexdo_w +#define __msa_ftq_h __builtin_msa_ftq_h +#define __msa_ftq_w __builtin_msa_ftq_w +#define __msa_fmin_w __builtin_msa_fmin_w +#define __msa_fmin_d __builtin_msa_fmin_d +#define __msa_fmin_a_w __builtin_msa_fmin_a_w +#define __msa_fmin_a_d __builtin_msa_fmin_a_d +#define __msa_fmax_w __builtin_msa_fmax_w +#define __msa_fmax_d __builtin_msa_fmax_d +#define __msa_fmax_a_w __builtin_msa_fmax_a_w +#define __msa_fmax_a_d __builtin_msa_fmax_a_d +#define __msa_mul_q_h __builtin_msa_mul_q_h +#define __msa_mul_q_w __builtin_msa_mul_q_w +#define __msa_mulr_q_h __builtin_msa_mulr_q_h +#define __msa_mulr_q_w __builtin_msa_mulr_q_w +#define __msa_madd_q_h __builtin_msa_madd_q_h +#define __msa_madd_q_w __builtin_msa_madd_q_w +#define __msa_maddr_q_h __builtin_msa_maddr_q_h +#define __msa_maddr_q_w __builtin_msa_maddr_q_w +#define __msa_msub_q_h __builtin_msa_msub_q_h +#define __msa_msub_q_w __builtin_msa_msub_q_w +#define __msa_msubr_q_h __builtin_msa_msubr_q_h +#define __msa_msubr_q_w __builtin_msa_msubr_q_w +#define __msa_fclass_w __builtin_msa_fclass_w +#define __msa_fclass_d __builtin_msa_fclass_d +#define __msa_fsqrt_w __builtin_msa_fsqrt_w +#define __msa_fsqrt_d __builtin_msa_fsqrt_d +#define __msa_frcp_w __builtin_msa_frcp_w +#define __msa_frcp_d __builtin_msa_frcp_d +#define __msa_frint_w __builtin_msa_frint_w +#define __msa_frint_d __builtin_msa_frint_d +#define __msa_frsqrt_w __builtin_msa_frsqrt_w +#define __msa_frsqrt_d __builtin_msa_frsqrt_d +#define __msa_flog2_w __builtin_msa_flog2_w +#define __msa_flog2_d __builtin_msa_flog2_d +#define __msa_fexupl_w __builtin_msa_fexupl_w +#define __msa_fexupl_d __builtin_msa_fexupl_d +#define __msa_fexupr_w __builtin_msa_fexupr_w +#define __msa_fexupr_d __builtin_msa_fexupr_d +#define __msa_ffql_w __builtin_msa_ffql_w +#define __msa_ffql_d __builtin_msa_ffql_d +#define __msa_ffqr_w __builtin_msa_ffqr_w +#define __msa_ffqr_d __builtin_msa_ffqr_d +#define __msa_ftint_s_w __builtin_msa_ftint_s_w +#define __msa_ftint_s_d __builtin_msa_ftint_s_d +#define __msa_ftint_u_w __builtin_msa_ftint_u_w +#define __msa_ftint_u_d __builtin_msa_ftint_u_d +#define __msa_ftrunc_s_w __builtin_msa_ftrunc_s_w +#define __msa_ftrunc_s_d __builtin_msa_ftrunc_s_d +#define __msa_ftrunc_u_w __builtin_msa_ftrunc_u_w +#define __msa_ftrunc_u_d __builtin_msa_ftrunc_u_d +#define __msa_ffint_s_w __builtin_msa_ffint_s_w +#define __msa_ffint_s_d __builtin_msa_ffint_s_d +#define __msa_ffint_u_w __builtin_msa_ffint_u_w +#define __msa_ffint_u_d __builtin_msa_ffint_u_d +#define __msa_cfcmsa __builtin_msa_cfcmsa +#define __msa_move_v __builtin_msa_move_v +#define __msa_cast_to_vector_float __builtin_msa_cast_to_vector_float +#define __msa_cast_to_vector_double __builtin_msa_cast_to_vector_double +#define __msa_cast_to_scalar_float __builtin_msa_cast_to_scalar_float +#define __msa_cast_to_scalar_double __builtin_msa_cast_to_scalar_double +#endif /* defined(__mips_msa) */ +#endif /* _MSA_H */ diff --git a/c_headers/mwaitxintrin.h b/c_headers/mwaitxintrin.h new file mode 100644 index 000000000..635f2ac6c --- /dev/null +++ b/c_headers/mwaitxintrin.h @@ -0,0 +1,47 @@ +/*===---- mwaitxintrin.h - MONITORX/MWAITX intrinsics ----------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __X86INTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef _MWAITXINTRIN_H +#define _MWAITXINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("mwaitx"))) +static __inline__ void __DEFAULT_FN_ATTRS +_mm_monitorx(void const * __p, unsigned __extensions, unsigned __hints) +{ + __builtin_ia32_monitorx((void *)__p, __extensions, __hints); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mwaitx(unsigned __extensions, unsigned __hints, unsigned __clock) +{ + __builtin_ia32_mwaitx(__extensions, __hints, __clock); +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* _MWAITXINTRIN_H */ diff --git a/c_headers/nmmintrin.h b/c_headers/nmmintrin.h index f12622d7b..57fec1596 100644 --- a/c_headers/nmmintrin.h +++ b/c_headers/nmmintrin.h @@ -24,12 +24,7 @@ #ifndef _NMMINTRIN_H #define _NMMINTRIN_H -#ifndef __SSE4_2__ -#error "SSE4.2 instruction set not enabled" -#else - /* To match expectations of gcc we put the sse4.2 definitions into smmintrin.h, just include it now then. */ #include -#endif /* __SSE4_2__ */ #endif /* _NMMINTRIN_H */ diff --git a/c_headers/opencl-c.h b/c_headers/opencl-c.h new file mode 100644 index 000000000..0c25d3127 --- /dev/null +++ b/c_headers/opencl-c.h @@ -0,0 +1,17055 @@ +//===--- opencl-c.h - OpenCL C language builtin function header -----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef _OPENCL_H_ +#define _OPENCL_H_ + +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +#ifndef cl_khr_depth_images +#define cl_khr_depth_images +#endif //cl_khr_depth_images +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +#define __ovld __attribute__((overloadable)) +#define __conv __attribute__((convergent)) + +// Optimizations +#define __purefn __attribute__((pure)) +#define __cnfn __attribute__((const)) + +// built-in scalar data types: + +/** + * An unsigned 8-bit integer. + */ +typedef unsigned char uchar; + +/** + * An unsigned 16-bit integer. + */ +typedef unsigned short ushort; + +/** + * An unsigned 32-bit integer. + */ +typedef unsigned int uint; + +/** + * An unsigned 64-bit integer. + */ +typedef unsigned long ulong; + +/** + * The unsigned integer type of the result of the sizeof operator. This + * is a 32-bit unsigned integer if CL_DEVICE_ADDRESS_BITS + * defined in table 4.3 is 32-bits and is a 64-bit unsigned integer if + * CL_DEVICE_ADDRESS_BITS is 64-bits. + */ +typedef __SIZE_TYPE__ size_t; + +/** + * A signed integer type that is the result of subtracting two pointers. + * This is a 32-bit signed integer if CL_DEVICE_ADDRESS_BITS + * defined in table 4.3 is 32-bits and is a 64-bit signed integer if + * CL_DEVICE_ADDRESS_BITS is 64-bits. + */ +typedef __PTRDIFF_TYPE__ ptrdiff_t; + +/** +* A signed integer type with the property that any valid pointer to +* void can be converted to this type, then converted back to pointer +* to void, and the result will compare equal to the original pointer. +*/ +typedef __INTPTR_TYPE__ intptr_t; + +/** +* An unsigned integer type with the property that any valid pointer to +* void can be converted to this type, then converted back to pointer +* to void, and the result will compare equal to the original pointer. +*/ +typedef __UINTPTR_TYPE__ uintptr_t; + +// built-in vector data types: +typedef char char2 __attribute__((ext_vector_type(2))); +typedef char char3 __attribute__((ext_vector_type(3))); +typedef char char4 __attribute__((ext_vector_type(4))); +typedef char char8 __attribute__((ext_vector_type(8))); +typedef char char16 __attribute__((ext_vector_type(16))); +typedef uchar uchar2 __attribute__((ext_vector_type(2))); +typedef uchar uchar3 __attribute__((ext_vector_type(3))); +typedef uchar uchar4 __attribute__((ext_vector_type(4))); +typedef uchar uchar8 __attribute__((ext_vector_type(8))); +typedef uchar uchar16 __attribute__((ext_vector_type(16))); +typedef short short2 __attribute__((ext_vector_type(2))); +typedef short short3 __attribute__((ext_vector_type(3))); +typedef short short4 __attribute__((ext_vector_type(4))); +typedef short short8 __attribute__((ext_vector_type(8))); +typedef short short16 __attribute__((ext_vector_type(16))); +typedef ushort ushort2 __attribute__((ext_vector_type(2))); +typedef ushort ushort3 __attribute__((ext_vector_type(3))); +typedef ushort ushort4 __attribute__((ext_vector_type(4))); +typedef ushort ushort8 __attribute__((ext_vector_type(8))); +typedef ushort ushort16 __attribute__((ext_vector_type(16))); +typedef int int2 __attribute__((ext_vector_type(2))); +typedef int int3 __attribute__((ext_vector_type(3))); +typedef int int4 __attribute__((ext_vector_type(4))); +typedef int int8 __attribute__((ext_vector_type(8))); +typedef int int16 __attribute__((ext_vector_type(16))); +typedef uint uint2 __attribute__((ext_vector_type(2))); +typedef uint uint3 __attribute__((ext_vector_type(3))); +typedef uint uint4 __attribute__((ext_vector_type(4))); +typedef uint uint8 __attribute__((ext_vector_type(8))); +typedef uint uint16 __attribute__((ext_vector_type(16))); +typedef long long2 __attribute__((ext_vector_type(2))); +typedef long long3 __attribute__((ext_vector_type(3))); +typedef long long4 __attribute__((ext_vector_type(4))); +typedef long long8 __attribute__((ext_vector_type(8))); +typedef long long16 __attribute__((ext_vector_type(16))); +typedef ulong ulong2 __attribute__((ext_vector_type(2))); +typedef ulong ulong3 __attribute__((ext_vector_type(3))); +typedef ulong ulong4 __attribute__((ext_vector_type(4))); +typedef ulong ulong8 __attribute__((ext_vector_type(8))); +typedef ulong ulong16 __attribute__((ext_vector_type(16))); +typedef float float2 __attribute__((ext_vector_type(2))); +typedef float float3 __attribute__((ext_vector_type(3))); +typedef float float4 __attribute__((ext_vector_type(4))); +typedef float float8 __attribute__((ext_vector_type(8))); +typedef float float16 __attribute__((ext_vector_type(16))); +#ifdef cl_khr_fp16 +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +typedef half half2 __attribute__((ext_vector_type(2))); +typedef half half3 __attribute__((ext_vector_type(3))); +typedef half half4 __attribute__((ext_vector_type(4))); +typedef half half8 __attribute__((ext_vector_type(8))); +typedef half half16 __attribute__((ext_vector_type(16))); +#endif +#ifdef cl_khr_fp64 +#if __OPENCL_C_VERSION__ < CL_VERSION_1_2 +#pragma OPENCL EXTENSION cl_khr_fp64 : enable +#endif +typedef double double2 __attribute__((ext_vector_type(2))); +typedef double double3 __attribute__((ext_vector_type(3))); +typedef double double4 __attribute__((ext_vector_type(4))); +typedef double double8 __attribute__((ext_vector_type(8))); +typedef double double16 __attribute__((ext_vector_type(16))); +#endif + +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +#define NULL ((void*)0) +#endif + +/** + * Value of maximum non-infinite single-precision floating-point + * number. + */ +#define MAXFLOAT 0x1.fffffep127f + +/** + * A positive float constant expression. HUGE_VALF evaluates + * to +infinity. Used as an error value returned by the built-in + * math functions. + */ +#define HUGE_VALF (__builtin_huge_valf()) + +/** + * A positive double constant expression. HUGE_VAL evaluates + * to +infinity. Used as an error value returned by the built-in + * math functions. + */ +#define HUGE_VAL (__builtin_huge_val()) + +/** + * A constant expression of type float representing positive or + * unsigned infinity. + */ +#define INFINITY (__builtin_inff()) + +/** + * A constant expression of type float representing a quiet NaN. + */ +#define NAN as_float(INT_MAX) + +#define FP_ILOGB0 INT_MIN +#define FP_ILOGBNAN INT_MAX + +#define FLT_DIG 6 +#define FLT_MANT_DIG 24 +#define FLT_MAX_10_EXP +38 +#define FLT_MAX_EXP +128 +#define FLT_MIN_10_EXP -37 +#define FLT_MIN_EXP -125 +#define FLT_RADIX 2 +#define FLT_MAX 0x1.fffffep127f +#define FLT_MIN 0x1.0p-126f +#define FLT_EPSILON 0x1.0p-23f + +#define M_E_F 2.71828182845904523536028747135266250f +#define M_LOG2E_F 1.44269504088896340735992468100189214f +#define M_LOG10E_F 0.434294481903251827651128918916605082f +#define M_LN2_F 0.693147180559945309417232121458176568f +#define M_LN10_F 2.30258509299404568401799145468436421f +#define M_PI_F 3.14159265358979323846264338327950288f +#define M_PI_2_F 1.57079632679489661923132169163975144f +#define M_PI_4_F 0.785398163397448309615660845819875721f +#define M_1_PI_F 0.318309886183790671537767526745028724f +#define M_2_PI_F 0.636619772367581343075535053490057448f +#define M_2_SQRTPI_F 1.12837916709551257389615890312154517f +#define M_SQRT2_F 1.41421356237309504880168872420969808f +#define M_SQRT1_2_F 0.707106781186547524400844362104849039f + +#define DBL_DIG 15 +#define DBL_MANT_DIG 53 +#define DBL_MAX_10_EXP +308 +#define DBL_MAX_EXP +1024 +#define DBL_MIN_10_EXP -307 +#define DBL_MIN_EXP -1021 +#define DBL_RADIX 2 +#define DBL_MAX 0x1.fffffffffffffp1023 +#define DBL_MIN 0x1.0p-1022 +#define DBL_EPSILON 0x1.0p-52 + +#define M_E 0x1.5bf0a8b145769p+1 +#define M_LOG2E 0x1.71547652b82fep+0 +#define M_LOG10E 0x1.bcb7b1526e50ep-2 +#define M_LN2 0x1.62e42fefa39efp-1 +#define M_LN10 0x1.26bb1bbb55516p+1 +#define M_PI 0x1.921fb54442d18p+1 +#define M_PI_2 0x1.921fb54442d18p+0 +#define M_PI_4 0x1.921fb54442d18p-1 +#define M_1_PI 0x1.45f306dc9c883p-2 +#define M_2_PI 0x1.45f306dc9c883p-1 +#define M_2_SQRTPI 0x1.20dd750429b6dp+0 +#define M_SQRT2 0x1.6a09e667f3bcdp+0 +#define M_SQRT1_2 0x1.6a09e667f3bcdp-1 + +#ifdef cl_khr_fp16 + +#define HALF_DIG 3 +#define HALF_MANT_DIG 11 +#define HALF_MAX_10_EXP +4 +#define HALF_MAX_EXP +16 +#define HALF_MIN_10_EXP -4 +#define HALF_MIN_EXP -13 +#define HALF_RADIX 2 +#define HALF_MAX ((0x1.ffcp15h)) +#define HALF_MIN ((0x1.0p-14h)) +#define HALF_EPSILON ((0x1.0p-10h)) + +#define M_E_H 2.71828182845904523536028747135266250h +#define M_LOG2E_H 1.44269504088896340735992468100189214h +#define M_LOG10E_H 0.434294481903251827651128918916605082h +#define M_LN2_H 0.693147180559945309417232121458176568h +#define M_LN10_H 2.30258509299404568401799145468436421h +#define M_PI_H 3.14159265358979323846264338327950288h +#define M_PI_2_H 1.57079632679489661923132169163975144h +#define M_PI_4_H 0.785398163397448309615660845819875721h +#define M_1_PI_H 0.318309886183790671537767526745028724h +#define M_2_PI_H 0.636619772367581343075535053490057448h +#define M_2_SQRTPI_H 1.12837916709551257389615890312154517h +#define M_SQRT2_H 1.41421356237309504880168872420969808h +#define M_SQRT1_2_H 0.707106781186547524400844362104849039h + +#endif //cl_khr_fp16 + +#define CHAR_BIT 8 +#define SCHAR_MAX 127 +#define SCHAR_MIN (-128) +#define UCHAR_MAX 255 +#define CHAR_MAX SCHAR_MAX +#define CHAR_MIN SCHAR_MIN +#define USHRT_MAX 65535 +#define SHRT_MAX 32767 +#define SHRT_MIN (-32768) +#define UINT_MAX 0xffffffff +#define INT_MAX 2147483647 +#define INT_MIN (-2147483647-1) +#define ULONG_MAX 0xffffffffffffffffUL +#define LONG_MAX 0x7fffffffffffffffL +#define LONG_MIN (-0x7fffffffffffffffL-1) + +// OpenCL v1.1/1.2/2.0 s6.2.3 - Explicit conversions + +char __ovld __cnfn convert_char_rte(char); +char __ovld __cnfn convert_char_sat_rte(char); +char __ovld __cnfn convert_char_rtz(char); +char __ovld __cnfn convert_char_sat_rtz(char); +char __ovld __cnfn convert_char_rtp(char); +char __ovld __cnfn convert_char_sat_rtp(char); +char __ovld __cnfn convert_char_rtn(char); +char __ovld __cnfn convert_char_sat_rtn(char); +char __ovld __cnfn convert_char(char); +char __ovld __cnfn convert_char_sat(char); +char __ovld __cnfn convert_char_rte(uchar); +char __ovld __cnfn convert_char_sat_rte(uchar); +char __ovld __cnfn convert_char_rtz(uchar); +char __ovld __cnfn convert_char_sat_rtz(uchar); +char __ovld __cnfn convert_char_rtp(uchar); +char __ovld __cnfn convert_char_sat_rtp(uchar); +char __ovld __cnfn convert_char_rtn(uchar); +char __ovld __cnfn convert_char_sat_rtn(uchar); +char __ovld __cnfn convert_char(uchar); +char __ovld __cnfn convert_char_sat(uchar); +char __ovld __cnfn convert_char_rte(short); +char __ovld __cnfn convert_char_sat_rte(short); +char __ovld __cnfn convert_char_rtz(short); +char __ovld __cnfn convert_char_sat_rtz(short); +char __ovld __cnfn convert_char_rtp(short); +char __ovld __cnfn convert_char_sat_rtp(short); +char __ovld __cnfn convert_char_rtn(short); +char __ovld __cnfn convert_char_sat_rtn(short); +char __ovld __cnfn convert_char(short); +char __ovld __cnfn convert_char_sat(short); +char __ovld __cnfn convert_char_rte(ushort); +char __ovld __cnfn convert_char_sat_rte(ushort); +char __ovld __cnfn convert_char_rtz(ushort); +char __ovld __cnfn convert_char_sat_rtz(ushort); +char __ovld __cnfn convert_char_rtp(ushort); +char __ovld __cnfn convert_char_sat_rtp(ushort); +char __ovld __cnfn convert_char_rtn(ushort); +char __ovld __cnfn convert_char_sat_rtn(ushort); +char __ovld __cnfn convert_char(ushort); +char __ovld __cnfn convert_char_sat(ushort); +char __ovld __cnfn convert_char_rte(int); +char __ovld __cnfn convert_char_sat_rte(int); +char __ovld __cnfn convert_char_rtz(int); +char __ovld __cnfn convert_char_sat_rtz(int); +char __ovld __cnfn convert_char_rtp(int); +char __ovld __cnfn convert_char_sat_rtp(int); +char __ovld __cnfn convert_char_rtn(int); +char __ovld __cnfn convert_char_sat_rtn(int); +char __ovld __cnfn convert_char(int); +char __ovld __cnfn convert_char_sat(int); +char __ovld __cnfn convert_char_rte(uint); +char __ovld __cnfn convert_char_sat_rte(uint); +char __ovld __cnfn convert_char_rtz(uint); +char __ovld __cnfn convert_char_sat_rtz(uint); +char __ovld __cnfn convert_char_rtp(uint); +char __ovld __cnfn convert_char_sat_rtp(uint); +char __ovld __cnfn convert_char_rtn(uint); +char __ovld __cnfn convert_char_sat_rtn(uint); +char __ovld __cnfn convert_char(uint); +char __ovld __cnfn convert_char_sat(uint); +char __ovld __cnfn convert_char_rte(long); +char __ovld __cnfn convert_char_sat_rte(long); +char __ovld __cnfn convert_char_rtz(long); +char __ovld __cnfn convert_char_sat_rtz(long); +char __ovld __cnfn convert_char_rtp(long); +char __ovld __cnfn convert_char_sat_rtp(long); +char __ovld __cnfn convert_char_rtn(long); +char __ovld __cnfn convert_char_sat_rtn(long); +char __ovld __cnfn convert_char(long); +char __ovld __cnfn convert_char_sat(long); +char __ovld __cnfn convert_char_rte(ulong); +char __ovld __cnfn convert_char_sat_rte(ulong); +char __ovld __cnfn convert_char_rtz(ulong); +char __ovld __cnfn convert_char_sat_rtz(ulong); +char __ovld __cnfn convert_char_rtp(ulong); +char __ovld __cnfn convert_char_sat_rtp(ulong); +char __ovld __cnfn convert_char_rtn(ulong); +char __ovld __cnfn convert_char_sat_rtn(ulong); +char __ovld __cnfn convert_char(ulong); +char __ovld __cnfn convert_char_sat(ulong); +char __ovld __cnfn convert_char_rte(float); +char __ovld __cnfn convert_char_sat_rte(float); +char __ovld __cnfn convert_char_rtz(float); +char __ovld __cnfn convert_char_sat_rtz(float); +char __ovld __cnfn convert_char_rtp(float); +char __ovld __cnfn convert_char_sat_rtp(float); +char __ovld __cnfn convert_char_rtn(float); +char __ovld __cnfn convert_char_sat_rtn(float); +char __ovld __cnfn convert_char(float); +char __ovld __cnfn convert_char_sat(float); +uchar __ovld __cnfn convert_uchar_rte(char); +uchar __ovld __cnfn convert_uchar_sat_rte(char); +uchar __ovld __cnfn convert_uchar_rtz(char); +uchar __ovld __cnfn convert_uchar_sat_rtz(char); +uchar __ovld __cnfn convert_uchar_rtp(char); +uchar __ovld __cnfn convert_uchar_sat_rtp(char); +uchar __ovld __cnfn convert_uchar_rtn(char); +uchar __ovld __cnfn convert_uchar_sat_rtn(char); +uchar __ovld __cnfn convert_uchar(char); +uchar __ovld __cnfn convert_uchar_sat(char); +uchar __ovld __cnfn convert_uchar_rte(uchar); +uchar __ovld __cnfn convert_uchar_sat_rte(uchar); +uchar __ovld __cnfn convert_uchar_rtz(uchar); +uchar __ovld __cnfn convert_uchar_sat_rtz(uchar); +uchar __ovld __cnfn convert_uchar_rtp(uchar); +uchar __ovld __cnfn convert_uchar_sat_rtp(uchar); +uchar __ovld __cnfn convert_uchar_rtn(uchar); +uchar __ovld __cnfn convert_uchar_sat_rtn(uchar); +uchar __ovld __cnfn convert_uchar(uchar); +uchar __ovld __cnfn convert_uchar_sat(uchar); +uchar __ovld __cnfn convert_uchar_rte(short); +uchar __ovld __cnfn convert_uchar_sat_rte(short); +uchar __ovld __cnfn convert_uchar_rtz(short); +uchar __ovld __cnfn convert_uchar_sat_rtz(short); +uchar __ovld __cnfn convert_uchar_rtp(short); +uchar __ovld __cnfn convert_uchar_sat_rtp(short); +uchar __ovld __cnfn convert_uchar_rtn(short); +uchar __ovld __cnfn convert_uchar_sat_rtn(short); +uchar __ovld __cnfn convert_uchar(short); +uchar __ovld __cnfn convert_uchar_sat(short); +uchar __ovld __cnfn convert_uchar_rte(ushort); +uchar __ovld __cnfn convert_uchar_sat_rte(ushort); +uchar __ovld __cnfn convert_uchar_rtz(ushort); +uchar __ovld __cnfn convert_uchar_sat_rtz(ushort); +uchar __ovld __cnfn convert_uchar_rtp(ushort); +uchar __ovld __cnfn convert_uchar_sat_rtp(ushort); +uchar __ovld __cnfn convert_uchar_rtn(ushort); +uchar __ovld __cnfn convert_uchar_sat_rtn(ushort); +uchar __ovld __cnfn convert_uchar(ushort); +uchar __ovld __cnfn convert_uchar_sat(ushort); +uchar __ovld __cnfn convert_uchar_rte(int); +uchar __ovld __cnfn convert_uchar_sat_rte(int); +uchar __ovld __cnfn convert_uchar_rtz(int); +uchar __ovld __cnfn convert_uchar_sat_rtz(int); +uchar __ovld __cnfn convert_uchar_rtp(int); +uchar __ovld __cnfn convert_uchar_sat_rtp(int); +uchar __ovld __cnfn convert_uchar_rtn(int); +uchar __ovld __cnfn convert_uchar_sat_rtn(int); +uchar __ovld __cnfn convert_uchar(int); +uchar __ovld __cnfn convert_uchar_sat(int); +uchar __ovld __cnfn convert_uchar_rte(uint); +uchar __ovld __cnfn convert_uchar_sat_rte(uint); +uchar __ovld __cnfn convert_uchar_rtz(uint); +uchar __ovld __cnfn convert_uchar_sat_rtz(uint); +uchar __ovld __cnfn convert_uchar_rtp(uint); +uchar __ovld __cnfn convert_uchar_sat_rtp(uint); +uchar __ovld __cnfn convert_uchar_rtn(uint); +uchar __ovld __cnfn convert_uchar_sat_rtn(uint); +uchar __ovld __cnfn convert_uchar(uint); +uchar __ovld __cnfn convert_uchar_sat(uint); +uchar __ovld __cnfn convert_uchar_rte(long); +uchar __ovld __cnfn convert_uchar_sat_rte(long); +uchar __ovld __cnfn convert_uchar_rtz(long); +uchar __ovld __cnfn convert_uchar_sat_rtz(long); +uchar __ovld __cnfn convert_uchar_rtp(long); +uchar __ovld __cnfn convert_uchar_sat_rtp(long); +uchar __ovld __cnfn convert_uchar_rtn(long); +uchar __ovld __cnfn convert_uchar_sat_rtn(long); +uchar __ovld __cnfn convert_uchar(long); +uchar __ovld __cnfn convert_uchar_sat(long); +uchar __ovld __cnfn convert_uchar_rte(ulong); +uchar __ovld __cnfn convert_uchar_sat_rte(ulong); +uchar __ovld __cnfn convert_uchar_rtz(ulong); +uchar __ovld __cnfn convert_uchar_sat_rtz(ulong); +uchar __ovld __cnfn convert_uchar_rtp(ulong); +uchar __ovld __cnfn convert_uchar_sat_rtp(ulong); +uchar __ovld __cnfn convert_uchar_rtn(ulong); +uchar __ovld __cnfn convert_uchar_sat_rtn(ulong); +uchar __ovld __cnfn convert_uchar(ulong); +uchar __ovld __cnfn convert_uchar_sat(ulong); +uchar __ovld __cnfn convert_uchar_rte(float); +uchar __ovld __cnfn convert_uchar_sat_rte(float); +uchar __ovld __cnfn convert_uchar_rtz(float); +uchar __ovld __cnfn convert_uchar_sat_rtz(float); +uchar __ovld __cnfn convert_uchar_rtp(float); +uchar __ovld __cnfn convert_uchar_sat_rtp(float); +uchar __ovld __cnfn convert_uchar_rtn(float); +uchar __ovld __cnfn convert_uchar_sat_rtn(float); +uchar __ovld __cnfn convert_uchar(float); +uchar __ovld __cnfn convert_uchar_sat(float); + +short __ovld __cnfn convert_short_rte(char); +short __ovld __cnfn convert_short_sat_rte(char); +short __ovld __cnfn convert_short_rtz(char); +short __ovld __cnfn convert_short_sat_rtz(char); +short __ovld __cnfn convert_short_rtp(char); +short __ovld __cnfn convert_short_sat_rtp(char); +short __ovld __cnfn convert_short_rtn(char); +short __ovld __cnfn convert_short_sat_rtn(char); +short __ovld __cnfn convert_short(char); +short __ovld __cnfn convert_short_sat(char); +short __ovld __cnfn convert_short_rte(uchar); +short __ovld __cnfn convert_short_sat_rte(uchar); +short __ovld __cnfn convert_short_rtz(uchar); +short __ovld __cnfn convert_short_sat_rtz(uchar); +short __ovld __cnfn convert_short_rtp(uchar); +short __ovld __cnfn convert_short_sat_rtp(uchar); +short __ovld __cnfn convert_short_rtn(uchar); +short __ovld __cnfn convert_short_sat_rtn(uchar); +short __ovld __cnfn convert_short(uchar); +short __ovld __cnfn convert_short_sat(uchar); +short __ovld __cnfn convert_short_rte(short); +short __ovld __cnfn convert_short_sat_rte(short); +short __ovld __cnfn convert_short_rtz(short); +short __ovld __cnfn convert_short_sat_rtz(short); +short __ovld __cnfn convert_short_rtp(short); +short __ovld __cnfn convert_short_sat_rtp(short); +short __ovld __cnfn convert_short_rtn(short); +short __ovld __cnfn convert_short_sat_rtn(short); +short __ovld __cnfn convert_short(short); +short __ovld __cnfn convert_short_sat(short); +short __ovld __cnfn convert_short_rte(ushort); +short __ovld __cnfn convert_short_sat_rte(ushort); +short __ovld __cnfn convert_short_rtz(ushort); +short __ovld __cnfn convert_short_sat_rtz(ushort); +short __ovld __cnfn convert_short_rtp(ushort); +short __ovld __cnfn convert_short_sat_rtp(ushort); +short __ovld __cnfn convert_short_rtn(ushort); +short __ovld __cnfn convert_short_sat_rtn(ushort); +short __ovld __cnfn convert_short(ushort); +short __ovld __cnfn convert_short_sat(ushort); +short __ovld __cnfn convert_short_rte(int); +short __ovld __cnfn convert_short_sat_rte(int); +short __ovld __cnfn convert_short_rtz(int); +short __ovld __cnfn convert_short_sat_rtz(int); +short __ovld __cnfn convert_short_rtp(int); +short __ovld __cnfn convert_short_sat_rtp(int); +short __ovld __cnfn convert_short_rtn(int); +short __ovld __cnfn convert_short_sat_rtn(int); +short __ovld __cnfn convert_short(int); +short __ovld __cnfn convert_short_sat(int); +short __ovld __cnfn convert_short_rte(uint); +short __ovld __cnfn convert_short_sat_rte(uint); +short __ovld __cnfn convert_short_rtz(uint); +short __ovld __cnfn convert_short_sat_rtz(uint); +short __ovld __cnfn convert_short_rtp(uint); +short __ovld __cnfn convert_short_sat_rtp(uint); +short __ovld __cnfn convert_short_rtn(uint); +short __ovld __cnfn convert_short_sat_rtn(uint); +short __ovld __cnfn convert_short(uint); +short __ovld __cnfn convert_short_sat(uint); +short __ovld __cnfn convert_short_rte(long); +short __ovld __cnfn convert_short_sat_rte(long); +short __ovld __cnfn convert_short_rtz(long); +short __ovld __cnfn convert_short_sat_rtz(long); +short __ovld __cnfn convert_short_rtp(long); +short __ovld __cnfn convert_short_sat_rtp(long); +short __ovld __cnfn convert_short_rtn(long); +short __ovld __cnfn convert_short_sat_rtn(long); +short __ovld __cnfn convert_short(long); +short __ovld __cnfn convert_short_sat(long); +short __ovld __cnfn convert_short_rte(ulong); +short __ovld __cnfn convert_short_sat_rte(ulong); +short __ovld __cnfn convert_short_rtz(ulong); +short __ovld __cnfn convert_short_sat_rtz(ulong); +short __ovld __cnfn convert_short_rtp(ulong); +short __ovld __cnfn convert_short_sat_rtp(ulong); +short __ovld __cnfn convert_short_rtn(ulong); +short __ovld __cnfn convert_short_sat_rtn(ulong); +short __ovld __cnfn convert_short(ulong); +short __ovld __cnfn convert_short_sat(ulong); +short __ovld __cnfn convert_short_rte(float); +short __ovld __cnfn convert_short_sat_rte(float); +short __ovld __cnfn convert_short_rtz(float); +short __ovld __cnfn convert_short_sat_rtz(float); +short __ovld __cnfn convert_short_rtp(float); +short __ovld __cnfn convert_short_sat_rtp(float); +short __ovld __cnfn convert_short_rtn(float); +short __ovld __cnfn convert_short_sat_rtn(float); +short __ovld __cnfn convert_short(float); +short __ovld __cnfn convert_short_sat(float); +ushort __ovld __cnfn convert_ushort_rte(char); +ushort __ovld __cnfn convert_ushort_sat_rte(char); +ushort __ovld __cnfn convert_ushort_rtz(char); +ushort __ovld __cnfn convert_ushort_sat_rtz(char); +ushort __ovld __cnfn convert_ushort_rtp(char); +ushort __ovld __cnfn convert_ushort_sat_rtp(char); +ushort __ovld __cnfn convert_ushort_rtn(char); +ushort __ovld __cnfn convert_ushort_sat_rtn(char); +ushort __ovld __cnfn convert_ushort(char); +ushort __ovld __cnfn convert_ushort_sat(char); +ushort __ovld __cnfn convert_ushort_rte(uchar); +ushort __ovld __cnfn convert_ushort_sat_rte(uchar); +ushort __ovld __cnfn convert_ushort_rtz(uchar); +ushort __ovld __cnfn convert_ushort_sat_rtz(uchar); +ushort __ovld __cnfn convert_ushort_rtp(uchar); +ushort __ovld __cnfn convert_ushort_sat_rtp(uchar); +ushort __ovld __cnfn convert_ushort_rtn(uchar); +ushort __ovld __cnfn convert_ushort_sat_rtn(uchar); +ushort __ovld __cnfn convert_ushort(uchar); +ushort __ovld __cnfn convert_ushort_sat(uchar); +ushort __ovld __cnfn convert_ushort_rte(short); +ushort __ovld __cnfn convert_ushort_sat_rte(short); +ushort __ovld __cnfn convert_ushort_rtz(short); +ushort __ovld __cnfn convert_ushort_sat_rtz(short); +ushort __ovld __cnfn convert_ushort_rtp(short); +ushort __ovld __cnfn convert_ushort_sat_rtp(short); +ushort __ovld __cnfn convert_ushort_rtn(short); +ushort __ovld __cnfn convert_ushort_sat_rtn(short); +ushort __ovld __cnfn convert_ushort(short); +ushort __ovld __cnfn convert_ushort_sat(short); +ushort __ovld __cnfn convert_ushort_rte(ushort); +ushort __ovld __cnfn convert_ushort_sat_rte(ushort); +ushort __ovld __cnfn convert_ushort_rtz(ushort); +ushort __ovld __cnfn convert_ushort_sat_rtz(ushort); +ushort __ovld __cnfn convert_ushort_rtp(ushort); +ushort __ovld __cnfn convert_ushort_sat_rtp(ushort); +ushort __ovld __cnfn convert_ushort_rtn(ushort); +ushort __ovld __cnfn convert_ushort_sat_rtn(ushort); +ushort __ovld __cnfn convert_ushort(ushort); +ushort __ovld __cnfn convert_ushort_sat(ushort); +ushort __ovld __cnfn convert_ushort_rte(int); +ushort __ovld __cnfn convert_ushort_sat_rte(int); +ushort __ovld __cnfn convert_ushort_rtz(int); +ushort __ovld __cnfn convert_ushort_sat_rtz(int); +ushort __ovld __cnfn convert_ushort_rtp(int); +ushort __ovld __cnfn convert_ushort_sat_rtp(int); +ushort __ovld __cnfn convert_ushort_rtn(int); +ushort __ovld __cnfn convert_ushort_sat_rtn(int); +ushort __ovld __cnfn convert_ushort(int); +ushort __ovld __cnfn convert_ushort_sat(int); +ushort __ovld __cnfn convert_ushort_rte(uint); +ushort __ovld __cnfn convert_ushort_sat_rte(uint); +ushort __ovld __cnfn convert_ushort_rtz(uint); +ushort __ovld __cnfn convert_ushort_sat_rtz(uint); +ushort __ovld __cnfn convert_ushort_rtp(uint); +ushort __ovld __cnfn convert_ushort_sat_rtp(uint); +ushort __ovld __cnfn convert_ushort_rtn(uint); +ushort __ovld __cnfn convert_ushort_sat_rtn(uint); +ushort __ovld __cnfn convert_ushort(uint); +ushort __ovld __cnfn convert_ushort_sat(uint); +ushort __ovld __cnfn convert_ushort_rte(long); +ushort __ovld __cnfn convert_ushort_sat_rte(long); +ushort __ovld __cnfn convert_ushort_rtz(long); +ushort __ovld __cnfn convert_ushort_sat_rtz(long); +ushort __ovld __cnfn convert_ushort_rtp(long); +ushort __ovld __cnfn convert_ushort_sat_rtp(long); +ushort __ovld __cnfn convert_ushort_rtn(long); +ushort __ovld __cnfn convert_ushort_sat_rtn(long); +ushort __ovld __cnfn convert_ushort(long); +ushort __ovld __cnfn convert_ushort_sat(long); +ushort __ovld __cnfn convert_ushort_rte(ulong); +ushort __ovld __cnfn convert_ushort_sat_rte(ulong); +ushort __ovld __cnfn convert_ushort_rtz(ulong); +ushort __ovld __cnfn convert_ushort_sat_rtz(ulong); +ushort __ovld __cnfn convert_ushort_rtp(ulong); +ushort __ovld __cnfn convert_ushort_sat_rtp(ulong); +ushort __ovld __cnfn convert_ushort_rtn(ulong); +ushort __ovld __cnfn convert_ushort_sat_rtn(ulong); +ushort __ovld __cnfn convert_ushort(ulong); +ushort __ovld __cnfn convert_ushort_sat(ulong); +ushort __ovld __cnfn convert_ushort_rte(float); +ushort __ovld __cnfn convert_ushort_sat_rte(float); +ushort __ovld __cnfn convert_ushort_rtz(float); +ushort __ovld __cnfn convert_ushort_sat_rtz(float); +ushort __ovld __cnfn convert_ushort_rtp(float); +ushort __ovld __cnfn convert_ushort_sat_rtp(float); +ushort __ovld __cnfn convert_ushort_rtn(float); +ushort __ovld __cnfn convert_ushort_sat_rtn(float); +ushort __ovld __cnfn convert_ushort(float); +ushort __ovld __cnfn convert_ushort_sat(float); +int __ovld __cnfn convert_int_rte(char); +int __ovld __cnfn convert_int_sat_rte(char); +int __ovld __cnfn convert_int_rtz(char); +int __ovld __cnfn convert_int_sat_rtz(char); +int __ovld __cnfn convert_int_rtp(char); +int __ovld __cnfn convert_int_sat_rtp(char); +int __ovld __cnfn convert_int_rtn(char); +int __ovld __cnfn convert_int_sat_rtn(char); +int __ovld __cnfn convert_int(char); +int __ovld __cnfn convert_int_sat(char); +int __ovld __cnfn convert_int_rte(uchar); +int __ovld __cnfn convert_int_sat_rte(uchar); +int __ovld __cnfn convert_int_rtz(uchar); +int __ovld __cnfn convert_int_sat_rtz(uchar); +int __ovld __cnfn convert_int_rtp(uchar); +int __ovld __cnfn convert_int_sat_rtp(uchar); +int __ovld __cnfn convert_int_rtn(uchar); +int __ovld __cnfn convert_int_sat_rtn(uchar); +int __ovld __cnfn convert_int(uchar); +int __ovld __cnfn convert_int_sat(uchar); +int __ovld __cnfn convert_int_rte(short); +int __ovld __cnfn convert_int_sat_rte(short); +int __ovld __cnfn convert_int_rtz(short); +int __ovld __cnfn convert_int_sat_rtz(short); +int __ovld __cnfn convert_int_rtp(short); +int __ovld __cnfn convert_int_sat_rtp(short); +int __ovld __cnfn convert_int_rtn(short); +int __ovld __cnfn convert_int_sat_rtn(short); +int __ovld __cnfn convert_int(short); +int __ovld __cnfn convert_int_sat(short); +int __ovld __cnfn convert_int_rte(ushort); +int __ovld __cnfn convert_int_sat_rte(ushort); +int __ovld __cnfn convert_int_rtz(ushort); +int __ovld __cnfn convert_int_sat_rtz(ushort); +int __ovld __cnfn convert_int_rtp(ushort); +int __ovld __cnfn convert_int_sat_rtp(ushort); +int __ovld __cnfn convert_int_rtn(ushort); +int __ovld __cnfn convert_int_sat_rtn(ushort); +int __ovld __cnfn convert_int(ushort); +int __ovld __cnfn convert_int_sat(ushort); +int __ovld __cnfn convert_int_rte(int); +int __ovld __cnfn convert_int_sat_rte(int); +int __ovld __cnfn convert_int_rtz(int); +int __ovld __cnfn convert_int_sat_rtz(int); +int __ovld __cnfn convert_int_rtp(int); +int __ovld __cnfn convert_int_sat_rtp(int); +int __ovld __cnfn convert_int_rtn(int); +int __ovld __cnfn convert_int_sat_rtn(int); +int __ovld __cnfn convert_int(int); +int __ovld __cnfn convert_int_sat(int); +int __ovld __cnfn convert_int_rte(uint); +int __ovld __cnfn convert_int_sat_rte(uint); +int __ovld __cnfn convert_int_rtz(uint); +int __ovld __cnfn convert_int_sat_rtz(uint); +int __ovld __cnfn convert_int_rtp(uint); +int __ovld __cnfn convert_int_sat_rtp(uint); +int __ovld __cnfn convert_int_rtn(uint); +int __ovld __cnfn convert_int_sat_rtn(uint); +int __ovld __cnfn convert_int(uint); +int __ovld __cnfn convert_int_sat(uint); +int __ovld __cnfn convert_int_rte(long); +int __ovld __cnfn convert_int_sat_rte(long); +int __ovld __cnfn convert_int_rtz(long); +int __ovld __cnfn convert_int_sat_rtz(long); +int __ovld __cnfn convert_int_rtp(long); +int __ovld __cnfn convert_int_sat_rtp(long); +int __ovld __cnfn convert_int_rtn(long); +int __ovld __cnfn convert_int_sat_rtn(long); +int __ovld __cnfn convert_int(long); +int __ovld __cnfn convert_int_sat(long); +int __ovld __cnfn convert_int_rte(ulong); +int __ovld __cnfn convert_int_sat_rte(ulong); +int __ovld __cnfn convert_int_rtz(ulong); +int __ovld __cnfn convert_int_sat_rtz(ulong); +int __ovld __cnfn convert_int_rtp(ulong); +int __ovld __cnfn convert_int_sat_rtp(ulong); +int __ovld __cnfn convert_int_rtn(ulong); +int __ovld __cnfn convert_int_sat_rtn(ulong); +int __ovld __cnfn convert_int(ulong); +int __ovld __cnfn convert_int_sat(ulong); +int __ovld __cnfn convert_int_rte(float); +int __ovld __cnfn convert_int_sat_rte(float); +int __ovld __cnfn convert_int_rtz(float); +int __ovld __cnfn convert_int_sat_rtz(float); +int __ovld __cnfn convert_int_rtp(float); +int __ovld __cnfn convert_int_sat_rtp(float); +int __ovld __cnfn convert_int_rtn(float); +int __ovld __cnfn convert_int_sat_rtn(float); +int __ovld __cnfn convert_int(float); +int __ovld __cnfn convert_int_sat(float); +uint __ovld __cnfn convert_uint_rte(char); +uint __ovld __cnfn convert_uint_sat_rte(char); +uint __ovld __cnfn convert_uint_rtz(char); +uint __ovld __cnfn convert_uint_sat_rtz(char); +uint __ovld __cnfn convert_uint_rtp(char); +uint __ovld __cnfn convert_uint_sat_rtp(char); +uint __ovld __cnfn convert_uint_rtn(char); +uint __ovld __cnfn convert_uint_sat_rtn(char); +uint __ovld __cnfn convert_uint(char); +uint __ovld __cnfn convert_uint_sat(char); +uint __ovld __cnfn convert_uint_rte(uchar); +uint __ovld __cnfn convert_uint_sat_rte(uchar); +uint __ovld __cnfn convert_uint_rtz(uchar); +uint __ovld __cnfn convert_uint_sat_rtz(uchar); +uint __ovld __cnfn convert_uint_rtp(uchar); +uint __ovld __cnfn convert_uint_sat_rtp(uchar); +uint __ovld __cnfn convert_uint_rtn(uchar); +uint __ovld __cnfn convert_uint_sat_rtn(uchar); +uint __ovld __cnfn convert_uint(uchar); +uint __ovld __cnfn convert_uint_sat(uchar); +uint __ovld __cnfn convert_uint_rte(short); +uint __ovld __cnfn convert_uint_sat_rte(short); +uint __ovld __cnfn convert_uint_rtz(short); +uint __ovld __cnfn convert_uint_sat_rtz(short); +uint __ovld __cnfn convert_uint_rtp(short); +uint __ovld __cnfn convert_uint_sat_rtp(short); +uint __ovld __cnfn convert_uint_rtn(short); +uint __ovld __cnfn convert_uint_sat_rtn(short); +uint __ovld __cnfn convert_uint(short); +uint __ovld __cnfn convert_uint_sat(short); +uint __ovld __cnfn convert_uint_rte(ushort); +uint __ovld __cnfn convert_uint_sat_rte(ushort); +uint __ovld __cnfn convert_uint_rtz(ushort); +uint __ovld __cnfn convert_uint_sat_rtz(ushort); +uint __ovld __cnfn convert_uint_rtp(ushort); +uint __ovld __cnfn convert_uint_sat_rtp(ushort); +uint __ovld __cnfn convert_uint_rtn(ushort); +uint __ovld __cnfn convert_uint_sat_rtn(ushort); +uint __ovld __cnfn convert_uint(ushort); +uint __ovld __cnfn convert_uint_sat(ushort); +uint __ovld __cnfn convert_uint_rte(int); +uint __ovld __cnfn convert_uint_sat_rte(int); +uint __ovld __cnfn convert_uint_rtz(int); +uint __ovld __cnfn convert_uint_sat_rtz(int); +uint __ovld __cnfn convert_uint_rtp(int); +uint __ovld __cnfn convert_uint_sat_rtp(int); +uint __ovld __cnfn convert_uint_rtn(int); +uint __ovld __cnfn convert_uint_sat_rtn(int); +uint __ovld __cnfn convert_uint(int); +uint __ovld __cnfn convert_uint_sat(int); +uint __ovld __cnfn convert_uint_rte(uint); +uint __ovld __cnfn convert_uint_sat_rte(uint); +uint __ovld __cnfn convert_uint_rtz(uint); +uint __ovld __cnfn convert_uint_sat_rtz(uint); +uint __ovld __cnfn convert_uint_rtp(uint); +uint __ovld __cnfn convert_uint_sat_rtp(uint); +uint __ovld __cnfn convert_uint_rtn(uint); +uint __ovld __cnfn convert_uint_sat_rtn(uint); +uint __ovld __cnfn convert_uint(uint); +uint __ovld __cnfn convert_uint_sat(uint); +uint __ovld __cnfn convert_uint_rte(long); +uint __ovld __cnfn convert_uint_sat_rte(long); +uint __ovld __cnfn convert_uint_rtz(long); +uint __ovld __cnfn convert_uint_sat_rtz(long); +uint __ovld __cnfn convert_uint_rtp(long); +uint __ovld __cnfn convert_uint_sat_rtp(long); +uint __ovld __cnfn convert_uint_rtn(long); +uint __ovld __cnfn convert_uint_sat_rtn(long); +uint __ovld __cnfn convert_uint(long); +uint __ovld __cnfn convert_uint_sat(long); +uint __ovld __cnfn convert_uint_rte(ulong); +uint __ovld __cnfn convert_uint_sat_rte(ulong); +uint __ovld __cnfn convert_uint_rtz(ulong); +uint __ovld __cnfn convert_uint_sat_rtz(ulong); +uint __ovld __cnfn convert_uint_rtp(ulong); +uint __ovld __cnfn convert_uint_sat_rtp(ulong); +uint __ovld __cnfn convert_uint_rtn(ulong); +uint __ovld __cnfn convert_uint_sat_rtn(ulong); +uint __ovld __cnfn convert_uint(ulong); +uint __ovld __cnfn convert_uint_sat(ulong); +uint __ovld __cnfn convert_uint_rte(float); +uint __ovld __cnfn convert_uint_sat_rte(float); +uint __ovld __cnfn convert_uint_rtz(float); +uint __ovld __cnfn convert_uint_sat_rtz(float); +uint __ovld __cnfn convert_uint_rtp(float); +uint __ovld __cnfn convert_uint_sat_rtp(float); +uint __ovld __cnfn convert_uint_rtn(float); +uint __ovld __cnfn convert_uint_sat_rtn(float); +uint __ovld __cnfn convert_uint(float); +uint __ovld __cnfn convert_uint_sat(float); +long __ovld __cnfn convert_long_rte(char); +long __ovld __cnfn convert_long_sat_rte(char); +long __ovld __cnfn convert_long_rtz(char); +long __ovld __cnfn convert_long_sat_rtz(char); +long __ovld __cnfn convert_long_rtp(char); +long __ovld __cnfn convert_long_sat_rtp(char); +long __ovld __cnfn convert_long_rtn(char); +long __ovld __cnfn convert_long_sat_rtn(char); +long __ovld __cnfn convert_long(char); +long __ovld __cnfn convert_long_sat(char); +long __ovld __cnfn convert_long_rte(uchar); +long __ovld __cnfn convert_long_sat_rte(uchar); +long __ovld __cnfn convert_long_rtz(uchar); +long __ovld __cnfn convert_long_sat_rtz(uchar); +long __ovld __cnfn convert_long_rtp(uchar); +long __ovld __cnfn convert_long_sat_rtp(uchar); +long __ovld __cnfn convert_long_rtn(uchar); +long __ovld __cnfn convert_long_sat_rtn(uchar); +long __ovld __cnfn convert_long(uchar); +long __ovld __cnfn convert_long_sat(uchar); +long __ovld __cnfn convert_long_rte(short); +long __ovld __cnfn convert_long_sat_rte(short); +long __ovld __cnfn convert_long_rtz(short); +long __ovld __cnfn convert_long_sat_rtz(short); +long __ovld __cnfn convert_long_rtp(short); +long __ovld __cnfn convert_long_sat_rtp(short); +long __ovld __cnfn convert_long_rtn(short); +long __ovld __cnfn convert_long_sat_rtn(short); +long __ovld __cnfn convert_long(short); +long __ovld __cnfn convert_long_sat(short); +long __ovld __cnfn convert_long_rte(ushort); +long __ovld __cnfn convert_long_sat_rte(ushort); +long __ovld __cnfn convert_long_rtz(ushort); +long __ovld __cnfn convert_long_sat_rtz(ushort); +long __ovld __cnfn convert_long_rtp(ushort); +long __ovld __cnfn convert_long_sat_rtp(ushort); +long __ovld __cnfn convert_long_rtn(ushort); +long __ovld __cnfn convert_long_sat_rtn(ushort); +long __ovld __cnfn convert_long(ushort); +long __ovld __cnfn convert_long_sat(ushort); +long __ovld __cnfn convert_long_rte(int); +long __ovld __cnfn convert_long_sat_rte(int); +long __ovld __cnfn convert_long_rtz(int); +long __ovld __cnfn convert_long_sat_rtz(int); +long __ovld __cnfn convert_long_rtp(int); +long __ovld __cnfn convert_long_sat_rtp(int); +long __ovld __cnfn convert_long_rtn(int); +long __ovld __cnfn convert_long_sat_rtn(int); +long __ovld __cnfn convert_long(int); +long __ovld __cnfn convert_long_sat(int); +long __ovld __cnfn convert_long_rte(uint); +long __ovld __cnfn convert_long_sat_rte(uint); +long __ovld __cnfn convert_long_rtz(uint); +long __ovld __cnfn convert_long_sat_rtz(uint); +long __ovld __cnfn convert_long_rtp(uint); +long __ovld __cnfn convert_long_sat_rtp(uint); +long __ovld __cnfn convert_long_rtn(uint); +long __ovld __cnfn convert_long_sat_rtn(uint); +long __ovld __cnfn convert_long(uint); +long __ovld __cnfn convert_long_sat(uint); +long __ovld __cnfn convert_long_rte(long); +long __ovld __cnfn convert_long_sat_rte(long); +long __ovld __cnfn convert_long_rtz(long); +long __ovld __cnfn convert_long_sat_rtz(long); +long __ovld __cnfn convert_long_rtp(long); +long __ovld __cnfn convert_long_sat_rtp(long); +long __ovld __cnfn convert_long_rtn(long); +long __ovld __cnfn convert_long_sat_rtn(long); +long __ovld __cnfn convert_long(long); +long __ovld __cnfn convert_long_sat(long); +long __ovld __cnfn convert_long_rte(ulong); +long __ovld __cnfn convert_long_sat_rte(ulong); +long __ovld __cnfn convert_long_rtz(ulong); +long __ovld __cnfn convert_long_sat_rtz(ulong); +long __ovld __cnfn convert_long_rtp(ulong); +long __ovld __cnfn convert_long_sat_rtp(ulong); +long __ovld __cnfn convert_long_rtn(ulong); +long __ovld __cnfn convert_long_sat_rtn(ulong); +long __ovld __cnfn convert_long(ulong); +long __ovld __cnfn convert_long_sat(ulong); +long __ovld __cnfn convert_long_rte(float); +long __ovld __cnfn convert_long_sat_rte(float); +long __ovld __cnfn convert_long_rtz(float); +long __ovld __cnfn convert_long_sat_rtz(float); +long __ovld __cnfn convert_long_rtp(float); +long __ovld __cnfn convert_long_sat_rtp(float); +long __ovld __cnfn convert_long_rtn(float); +long __ovld __cnfn convert_long_sat_rtn(float); +long __ovld __cnfn convert_long(float); +long __ovld __cnfn convert_long_sat(float); +ulong __ovld __cnfn convert_ulong_rte(char); +ulong __ovld __cnfn convert_ulong_sat_rte(char); +ulong __ovld __cnfn convert_ulong_rtz(char); +ulong __ovld __cnfn convert_ulong_sat_rtz(char); +ulong __ovld __cnfn convert_ulong_rtp(char); +ulong __ovld __cnfn convert_ulong_sat_rtp(char); +ulong __ovld __cnfn convert_ulong_rtn(char); +ulong __ovld __cnfn convert_ulong_sat_rtn(char); +ulong __ovld __cnfn convert_ulong(char); +ulong __ovld __cnfn convert_ulong_sat(char); +ulong __ovld __cnfn convert_ulong_rte(uchar); +ulong __ovld __cnfn convert_ulong_sat_rte(uchar); +ulong __ovld __cnfn convert_ulong_rtz(uchar); +ulong __ovld __cnfn convert_ulong_sat_rtz(uchar); +ulong __ovld __cnfn convert_ulong_rtp(uchar); +ulong __ovld __cnfn convert_ulong_sat_rtp(uchar); +ulong __ovld __cnfn convert_ulong_rtn(uchar); +ulong __ovld __cnfn convert_ulong_sat_rtn(uchar); +ulong __ovld __cnfn convert_ulong(uchar); +ulong __ovld __cnfn convert_ulong_sat(uchar); +ulong __ovld __cnfn convert_ulong_rte(short); +ulong __ovld __cnfn convert_ulong_sat_rte(short); +ulong __ovld __cnfn convert_ulong_rtz(short); +ulong __ovld __cnfn convert_ulong_sat_rtz(short); +ulong __ovld __cnfn convert_ulong_rtp(short); +ulong __ovld __cnfn convert_ulong_sat_rtp(short); +ulong __ovld __cnfn convert_ulong_rtn(short); +ulong __ovld __cnfn convert_ulong_sat_rtn(short); +ulong __ovld __cnfn convert_ulong(short); +ulong __ovld __cnfn convert_ulong_sat(short); +ulong __ovld __cnfn convert_ulong_rte(ushort); +ulong __ovld __cnfn convert_ulong_sat_rte(ushort); +ulong __ovld __cnfn convert_ulong_rtz(ushort); +ulong __ovld __cnfn convert_ulong_sat_rtz(ushort); +ulong __ovld __cnfn convert_ulong_rtp(ushort); +ulong __ovld __cnfn convert_ulong_sat_rtp(ushort); +ulong __ovld __cnfn convert_ulong_rtn(ushort); +ulong __ovld __cnfn convert_ulong_sat_rtn(ushort); +ulong __ovld __cnfn convert_ulong(ushort); +ulong __ovld __cnfn convert_ulong_sat(ushort); +ulong __ovld __cnfn convert_ulong_rte(int); +ulong __ovld __cnfn convert_ulong_sat_rte(int); +ulong __ovld __cnfn convert_ulong_rtz(int); +ulong __ovld __cnfn convert_ulong_sat_rtz(int); +ulong __ovld __cnfn convert_ulong_rtp(int); +ulong __ovld __cnfn convert_ulong_sat_rtp(int); +ulong __ovld __cnfn convert_ulong_rtn(int); +ulong __ovld __cnfn convert_ulong_sat_rtn(int); +ulong __ovld __cnfn convert_ulong(int); +ulong __ovld __cnfn convert_ulong_sat(int); +ulong __ovld __cnfn convert_ulong_rte(uint); +ulong __ovld __cnfn convert_ulong_sat_rte(uint); +ulong __ovld __cnfn convert_ulong_rtz(uint); +ulong __ovld __cnfn convert_ulong_sat_rtz(uint); +ulong __ovld __cnfn convert_ulong_rtp(uint); +ulong __ovld __cnfn convert_ulong_sat_rtp(uint); +ulong __ovld __cnfn convert_ulong_rtn(uint); +ulong __ovld __cnfn convert_ulong_sat_rtn(uint); +ulong __ovld __cnfn convert_ulong(uint); +ulong __ovld __cnfn convert_ulong_sat(uint); +ulong __ovld __cnfn convert_ulong_rte(long); +ulong __ovld __cnfn convert_ulong_sat_rte(long); +ulong __ovld __cnfn convert_ulong_rtz(long); +ulong __ovld __cnfn convert_ulong_sat_rtz(long); +ulong __ovld __cnfn convert_ulong_rtp(long); +ulong __ovld __cnfn convert_ulong_sat_rtp(long); +ulong __ovld __cnfn convert_ulong_rtn(long); +ulong __ovld __cnfn convert_ulong_sat_rtn(long); +ulong __ovld __cnfn convert_ulong(long); +ulong __ovld __cnfn convert_ulong_sat(long); +ulong __ovld __cnfn convert_ulong_rte(ulong); +ulong __ovld __cnfn convert_ulong_sat_rte(ulong); +ulong __ovld __cnfn convert_ulong_rtz(ulong); +ulong __ovld __cnfn convert_ulong_sat_rtz(ulong); +ulong __ovld __cnfn convert_ulong_rtp(ulong); +ulong __ovld __cnfn convert_ulong_sat_rtp(ulong); +ulong __ovld __cnfn convert_ulong_rtn(ulong); +ulong __ovld __cnfn convert_ulong_sat_rtn(ulong); +ulong __ovld __cnfn convert_ulong(ulong); +ulong __ovld __cnfn convert_ulong_sat(ulong); +ulong __ovld __cnfn convert_ulong_rte(float); +ulong __ovld __cnfn convert_ulong_sat_rte(float); +ulong __ovld __cnfn convert_ulong_rtz(float); +ulong __ovld __cnfn convert_ulong_sat_rtz(float); +ulong __ovld __cnfn convert_ulong_rtp(float); +ulong __ovld __cnfn convert_ulong_sat_rtp(float); +ulong __ovld __cnfn convert_ulong_rtn(float); +ulong __ovld __cnfn convert_ulong_sat_rtn(float); +ulong __ovld __cnfn convert_ulong(float); +ulong __ovld __cnfn convert_ulong_sat(float); +float __ovld __cnfn convert_float_rte(char); +float __ovld __cnfn convert_float_rtz(char); +float __ovld __cnfn convert_float_rtp(char); +float __ovld __cnfn convert_float_rtn(char); +float __ovld __cnfn convert_float(char); +float __ovld __cnfn convert_float_rte(uchar); +float __ovld __cnfn convert_float_rtz(uchar); +float __ovld __cnfn convert_float_rtp(uchar); +float __ovld __cnfn convert_float_rtn(uchar); +float __ovld __cnfn convert_float(uchar); +float __ovld __cnfn convert_float_rte(short); +float __ovld __cnfn convert_float_rtz(short); +float __ovld __cnfn convert_float_rtp(short); +float __ovld __cnfn convert_float_rtn(short); +float __ovld __cnfn convert_float(short); +float __ovld __cnfn convert_float_rte(ushort); +float __ovld __cnfn convert_float_rtz(ushort); +float __ovld __cnfn convert_float_rtp(ushort); +float __ovld __cnfn convert_float_rtn(ushort); +float __ovld __cnfn convert_float(ushort); +float __ovld __cnfn convert_float_rte(int); +float __ovld __cnfn convert_float_rtz(int); +float __ovld __cnfn convert_float_rtp(int); +float __ovld __cnfn convert_float_rtn(int); +float __ovld __cnfn convert_float(int); +float __ovld __cnfn convert_float_rte(uint); +float __ovld __cnfn convert_float_rtz(uint); +float __ovld __cnfn convert_float_rtp(uint); +float __ovld __cnfn convert_float_rtn(uint); +float __ovld __cnfn convert_float(uint); +float __ovld __cnfn convert_float_rte(long); +float __ovld __cnfn convert_float_rtz(long); +float __ovld __cnfn convert_float_rtp(long); +float __ovld __cnfn convert_float_rtn(long); +float __ovld __cnfn convert_float(long); +float __ovld __cnfn convert_float_rte(ulong); +float __ovld __cnfn convert_float_rtz(ulong); +float __ovld __cnfn convert_float_rtp(ulong); +float __ovld __cnfn convert_float_rtn(ulong); +float __ovld __cnfn convert_float(ulong); +float __ovld __cnfn convert_float_rte(float); +float __ovld __cnfn convert_float_rtz(float); +float __ovld __cnfn convert_float_rtp(float); +float __ovld __cnfn convert_float_rtn(float); +float __ovld __cnfn convert_float(float); +char2 __ovld __cnfn convert_char2_rte(char2); +char2 __ovld __cnfn convert_char2_sat_rte(char2); +char2 __ovld __cnfn convert_char2_rtz(char2); +char2 __ovld __cnfn convert_char2_sat_rtz(char2); +char2 __ovld __cnfn convert_char2_rtp(char2); +char2 __ovld __cnfn convert_char2_sat_rtp(char2); +char2 __ovld __cnfn convert_char2_rtn(char2); +char2 __ovld __cnfn convert_char2_sat_rtn(char2); +char2 __ovld __cnfn convert_char2(char2); +char2 __ovld __cnfn convert_char2_sat(char2); +char2 __ovld __cnfn convert_char2_rte(uchar2); +char2 __ovld __cnfn convert_char2_sat_rte(uchar2); +char2 __ovld __cnfn convert_char2_rtz(uchar2); +char2 __ovld __cnfn convert_char2_sat_rtz(uchar2); +char2 __ovld __cnfn convert_char2_rtp(uchar2); +char2 __ovld __cnfn convert_char2_sat_rtp(uchar2); +char2 __ovld __cnfn convert_char2_rtn(uchar2); +char2 __ovld __cnfn convert_char2_sat_rtn(uchar2); +char2 __ovld __cnfn convert_char2(uchar2); +char2 __ovld __cnfn convert_char2_sat(uchar2); +char2 __ovld __cnfn convert_char2_rte(short2); +char2 __ovld __cnfn convert_char2_sat_rte(short2); +char2 __ovld __cnfn convert_char2_rtz(short2); +char2 __ovld __cnfn convert_char2_sat_rtz(short2); +char2 __ovld __cnfn convert_char2_rtp(short2); +char2 __ovld __cnfn convert_char2_sat_rtp(short2); +char2 __ovld __cnfn convert_char2_rtn(short2); +char2 __ovld __cnfn convert_char2_sat_rtn(short2); +char2 __ovld __cnfn convert_char2(short2); +char2 __ovld __cnfn convert_char2_sat(short2); +char2 __ovld __cnfn convert_char2_rte(ushort2); +char2 __ovld __cnfn convert_char2_sat_rte(ushort2); +char2 __ovld __cnfn convert_char2_rtz(ushort2); +char2 __ovld __cnfn convert_char2_sat_rtz(ushort2); +char2 __ovld __cnfn convert_char2_rtp(ushort2); +char2 __ovld __cnfn convert_char2_sat_rtp(ushort2); +char2 __ovld __cnfn convert_char2_rtn(ushort2); +char2 __ovld __cnfn convert_char2_sat_rtn(ushort2); +char2 __ovld __cnfn convert_char2(ushort2); +char2 __ovld __cnfn convert_char2_sat(ushort2); +char2 __ovld __cnfn convert_char2_rte(int2); +char2 __ovld __cnfn convert_char2_sat_rte(int2); +char2 __ovld __cnfn convert_char2_rtz(int2); +char2 __ovld __cnfn convert_char2_sat_rtz(int2); +char2 __ovld __cnfn convert_char2_rtp(int2); +char2 __ovld __cnfn convert_char2_sat_rtp(int2); +char2 __ovld __cnfn convert_char2_rtn(int2); +char2 __ovld __cnfn convert_char2_sat_rtn(int2); +char2 __ovld __cnfn convert_char2(int2); +char2 __ovld __cnfn convert_char2_sat(int2); +char2 __ovld __cnfn convert_char2_rte(uint2); +char2 __ovld __cnfn convert_char2_sat_rte(uint2); +char2 __ovld __cnfn convert_char2_rtz(uint2); +char2 __ovld __cnfn convert_char2_sat_rtz(uint2); +char2 __ovld __cnfn convert_char2_rtp(uint2); +char2 __ovld __cnfn convert_char2_sat_rtp(uint2); +char2 __ovld __cnfn convert_char2_rtn(uint2); +char2 __ovld __cnfn convert_char2_sat_rtn(uint2); +char2 __ovld __cnfn convert_char2(uint2); +char2 __ovld __cnfn convert_char2_sat(uint2); +char2 __ovld __cnfn convert_char2_rte(long2); +char2 __ovld __cnfn convert_char2_sat_rte(long2); +char2 __ovld __cnfn convert_char2_rtz(long2); +char2 __ovld __cnfn convert_char2_sat_rtz(long2); +char2 __ovld __cnfn convert_char2_rtp(long2); +char2 __ovld __cnfn convert_char2_sat_rtp(long2); +char2 __ovld __cnfn convert_char2_rtn(long2); +char2 __ovld __cnfn convert_char2_sat_rtn(long2); +char2 __ovld __cnfn convert_char2(long2); +char2 __ovld __cnfn convert_char2_sat(long2); +char2 __ovld __cnfn convert_char2_rte(ulong2); +char2 __ovld __cnfn convert_char2_sat_rte(ulong2); +char2 __ovld __cnfn convert_char2_rtz(ulong2); +char2 __ovld __cnfn convert_char2_sat_rtz(ulong2); +char2 __ovld __cnfn convert_char2_rtp(ulong2); +char2 __ovld __cnfn convert_char2_sat_rtp(ulong2); +char2 __ovld __cnfn convert_char2_rtn(ulong2); +char2 __ovld __cnfn convert_char2_sat_rtn(ulong2); +char2 __ovld __cnfn convert_char2(ulong2); +char2 __ovld __cnfn convert_char2_sat(ulong2); +char2 __ovld __cnfn convert_char2_rte(float2); +char2 __ovld __cnfn convert_char2_sat_rte(float2); +char2 __ovld __cnfn convert_char2_rtz(float2); +char2 __ovld __cnfn convert_char2_sat_rtz(float2); +char2 __ovld __cnfn convert_char2_rtp(float2); +char2 __ovld __cnfn convert_char2_sat_rtp(float2); +char2 __ovld __cnfn convert_char2_rtn(float2); +char2 __ovld __cnfn convert_char2_sat_rtn(float2); +char2 __ovld __cnfn convert_char2(float2); +char2 __ovld __cnfn convert_char2_sat(float2); +uchar2 __ovld __cnfn convert_uchar2_rte(char2); +uchar2 __ovld __cnfn convert_uchar2_sat_rte(char2); +uchar2 __ovld __cnfn convert_uchar2_rtz(char2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtz(char2); +uchar2 __ovld __cnfn convert_uchar2_rtp(char2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtp(char2); +uchar2 __ovld __cnfn convert_uchar2_rtn(char2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtn(char2); +uchar2 __ovld __cnfn convert_uchar2(char2); +uchar2 __ovld __cnfn convert_uchar2_sat(char2); +uchar2 __ovld __cnfn convert_uchar2_rte(uchar2); +uchar2 __ovld __cnfn convert_uchar2_sat_rte(uchar2); +uchar2 __ovld __cnfn convert_uchar2_rtz(uchar2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtz(uchar2); +uchar2 __ovld __cnfn convert_uchar2_rtp(uchar2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtp(uchar2); +uchar2 __ovld __cnfn convert_uchar2_rtn(uchar2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtn(uchar2); +uchar2 __ovld __cnfn convert_uchar2(uchar2); +uchar2 __ovld __cnfn convert_uchar2_sat(uchar2); +uchar2 __ovld __cnfn convert_uchar2_rte(short2); +uchar2 __ovld __cnfn convert_uchar2_sat_rte(short2); +uchar2 __ovld __cnfn convert_uchar2_rtz(short2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtz(short2); +uchar2 __ovld __cnfn convert_uchar2_rtp(short2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtp(short2); +uchar2 __ovld __cnfn convert_uchar2_rtn(short2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtn(short2); +uchar2 __ovld __cnfn convert_uchar2(short2); +uchar2 __ovld __cnfn convert_uchar2_sat(short2); +uchar2 __ovld __cnfn convert_uchar2_rte(ushort2); +uchar2 __ovld __cnfn convert_uchar2_sat_rte(ushort2); +uchar2 __ovld __cnfn convert_uchar2_rtz(ushort2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtz(ushort2); +uchar2 __ovld __cnfn convert_uchar2_rtp(ushort2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtp(ushort2); +uchar2 __ovld __cnfn convert_uchar2_rtn(ushort2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtn(ushort2); +uchar2 __ovld __cnfn convert_uchar2(ushort2); +uchar2 __ovld __cnfn convert_uchar2_sat(ushort2); +uchar2 __ovld __cnfn convert_uchar2_rte(int2); +uchar2 __ovld __cnfn convert_uchar2_sat_rte(int2); +uchar2 __ovld __cnfn convert_uchar2_rtz(int2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtz(int2); +uchar2 __ovld __cnfn convert_uchar2_rtp(int2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtp(int2); +uchar2 __ovld __cnfn convert_uchar2_rtn(int2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtn(int2); +uchar2 __ovld __cnfn convert_uchar2(int2); +uchar2 __ovld __cnfn convert_uchar2_sat(int2); +uchar2 __ovld __cnfn convert_uchar2_rte(uint2); +uchar2 __ovld __cnfn convert_uchar2_sat_rte(uint2); +uchar2 __ovld __cnfn convert_uchar2_rtz(uint2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtz(uint2); +uchar2 __ovld __cnfn convert_uchar2_rtp(uint2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtp(uint2); +uchar2 __ovld __cnfn convert_uchar2_rtn(uint2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtn(uint2); +uchar2 __ovld __cnfn convert_uchar2(uint2); +uchar2 __ovld __cnfn convert_uchar2_sat(uint2); +uchar2 __ovld __cnfn convert_uchar2_rte(long2); +uchar2 __ovld __cnfn convert_uchar2_sat_rte(long2); +uchar2 __ovld __cnfn convert_uchar2_rtz(long2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtz(long2); +uchar2 __ovld __cnfn convert_uchar2_rtp(long2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtp(long2); +uchar2 __ovld __cnfn convert_uchar2_rtn(long2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtn(long2); +uchar2 __ovld __cnfn convert_uchar2(long2); +uchar2 __ovld __cnfn convert_uchar2_sat(long2); +uchar2 __ovld __cnfn convert_uchar2_rte(ulong2); +uchar2 __ovld __cnfn convert_uchar2_sat_rte(ulong2); +uchar2 __ovld __cnfn convert_uchar2_rtz(ulong2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtz(ulong2); +uchar2 __ovld __cnfn convert_uchar2_rtp(ulong2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtp(ulong2); +uchar2 __ovld __cnfn convert_uchar2_rtn(ulong2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtn(ulong2); +uchar2 __ovld __cnfn convert_uchar2(ulong2); +uchar2 __ovld __cnfn convert_uchar2_sat(ulong2); +uchar2 __ovld __cnfn convert_uchar2_rte(float2); +uchar2 __ovld __cnfn convert_uchar2_sat_rte(float2); +uchar2 __ovld __cnfn convert_uchar2_rtz(float2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtz(float2); +uchar2 __ovld __cnfn convert_uchar2_rtp(float2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtp(float2); +uchar2 __ovld __cnfn convert_uchar2_rtn(float2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtn(float2); +uchar2 __ovld __cnfn convert_uchar2(float2); +uchar2 __ovld __cnfn convert_uchar2_sat(float2); +short2 __ovld __cnfn convert_short2_rte(char2); +short2 __ovld __cnfn convert_short2_sat_rte(char2); +short2 __ovld __cnfn convert_short2_rtz(char2); +short2 __ovld __cnfn convert_short2_sat_rtz(char2); +short2 __ovld __cnfn convert_short2_rtp(char2); +short2 __ovld __cnfn convert_short2_sat_rtp(char2); +short2 __ovld __cnfn convert_short2_rtn(char2); +short2 __ovld __cnfn convert_short2_sat_rtn(char2); +short2 __ovld __cnfn convert_short2(char2); +short2 __ovld __cnfn convert_short2_sat(char2); +short2 __ovld __cnfn convert_short2_rte(uchar2); +short2 __ovld __cnfn convert_short2_sat_rte(uchar2); +short2 __ovld __cnfn convert_short2_rtz(uchar2); +short2 __ovld __cnfn convert_short2_sat_rtz(uchar2); +short2 __ovld __cnfn convert_short2_rtp(uchar2); +short2 __ovld __cnfn convert_short2_sat_rtp(uchar2); +short2 __ovld __cnfn convert_short2_rtn(uchar2); +short2 __ovld __cnfn convert_short2_sat_rtn(uchar2); +short2 __ovld __cnfn convert_short2(uchar2); +short2 __ovld __cnfn convert_short2_sat(uchar2); +short2 __ovld __cnfn convert_short2_rte(short2); +short2 __ovld __cnfn convert_short2_sat_rte(short2); +short2 __ovld __cnfn convert_short2_rtz(short2); +short2 __ovld __cnfn convert_short2_sat_rtz(short2); +short2 __ovld __cnfn convert_short2_rtp(short2); +short2 __ovld __cnfn convert_short2_sat_rtp(short2); +short2 __ovld __cnfn convert_short2_rtn(short2); +short2 __ovld __cnfn convert_short2_sat_rtn(short2); +short2 __ovld __cnfn convert_short2(short2); +short2 __ovld __cnfn convert_short2_sat(short2); +short2 __ovld __cnfn convert_short2_rte(ushort2); +short2 __ovld __cnfn convert_short2_sat_rte(ushort2); +short2 __ovld __cnfn convert_short2_rtz(ushort2); +short2 __ovld __cnfn convert_short2_sat_rtz(ushort2); +short2 __ovld __cnfn convert_short2_rtp(ushort2); +short2 __ovld __cnfn convert_short2_sat_rtp(ushort2); +short2 __ovld __cnfn convert_short2_rtn(ushort2); +short2 __ovld __cnfn convert_short2_sat_rtn(ushort2); +short2 __ovld __cnfn convert_short2(ushort2); +short2 __ovld __cnfn convert_short2_sat(ushort2); +short2 __ovld __cnfn convert_short2_rte(int2); +short2 __ovld __cnfn convert_short2_sat_rte(int2); +short2 __ovld __cnfn convert_short2_rtz(int2); +short2 __ovld __cnfn convert_short2_sat_rtz(int2); +short2 __ovld __cnfn convert_short2_rtp(int2); +short2 __ovld __cnfn convert_short2_sat_rtp(int2); +short2 __ovld __cnfn convert_short2_rtn(int2); +short2 __ovld __cnfn convert_short2_sat_rtn(int2); +short2 __ovld __cnfn convert_short2(int2); +short2 __ovld __cnfn convert_short2_sat(int2); +short2 __ovld __cnfn convert_short2_rte(uint2); +short2 __ovld __cnfn convert_short2_sat_rte(uint2); +short2 __ovld __cnfn convert_short2_rtz(uint2); +short2 __ovld __cnfn convert_short2_sat_rtz(uint2); +short2 __ovld __cnfn convert_short2_rtp(uint2); +short2 __ovld __cnfn convert_short2_sat_rtp(uint2); +short2 __ovld __cnfn convert_short2_rtn(uint2); +short2 __ovld __cnfn convert_short2_sat_rtn(uint2); +short2 __ovld __cnfn convert_short2(uint2); +short2 __ovld __cnfn convert_short2_sat(uint2); +short2 __ovld __cnfn convert_short2_rte(long2); +short2 __ovld __cnfn convert_short2_sat_rte(long2); +short2 __ovld __cnfn convert_short2_rtz(long2); +short2 __ovld __cnfn convert_short2_sat_rtz(long2); +short2 __ovld __cnfn convert_short2_rtp(long2); +short2 __ovld __cnfn convert_short2_sat_rtp(long2); +short2 __ovld __cnfn convert_short2_rtn(long2); +short2 __ovld __cnfn convert_short2_sat_rtn(long2); +short2 __ovld __cnfn convert_short2(long2); +short2 __ovld __cnfn convert_short2_sat(long2); +short2 __ovld __cnfn convert_short2_rte(ulong2); +short2 __ovld __cnfn convert_short2_sat_rte(ulong2); +short2 __ovld __cnfn convert_short2_rtz(ulong2); +short2 __ovld __cnfn convert_short2_sat_rtz(ulong2); +short2 __ovld __cnfn convert_short2_rtp(ulong2); +short2 __ovld __cnfn convert_short2_sat_rtp(ulong2); +short2 __ovld __cnfn convert_short2_rtn(ulong2); +short2 __ovld __cnfn convert_short2_sat_rtn(ulong2); +short2 __ovld __cnfn convert_short2(ulong2); +short2 __ovld __cnfn convert_short2_sat(ulong2); +short2 __ovld __cnfn convert_short2_rte(float2); +short2 __ovld __cnfn convert_short2_sat_rte(float2); +short2 __ovld __cnfn convert_short2_rtz(float2); +short2 __ovld __cnfn convert_short2_sat_rtz(float2); +short2 __ovld __cnfn convert_short2_rtp(float2); +short2 __ovld __cnfn convert_short2_sat_rtp(float2); +short2 __ovld __cnfn convert_short2_rtn(float2); +short2 __ovld __cnfn convert_short2_sat_rtn(float2); +short2 __ovld __cnfn convert_short2(float2); +short2 __ovld __cnfn convert_short2_sat(float2); +ushort2 __ovld __cnfn convert_ushort2_rte(char2); +ushort2 __ovld __cnfn convert_ushort2_sat_rte(char2); +ushort2 __ovld __cnfn convert_ushort2_rtz(char2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtz(char2); +ushort2 __ovld __cnfn convert_ushort2_rtp(char2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtp(char2); +ushort2 __ovld __cnfn convert_ushort2_rtn(char2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtn(char2); +ushort2 __ovld __cnfn convert_ushort2(char2); +ushort2 __ovld __cnfn convert_ushort2_sat(char2); +ushort2 __ovld __cnfn convert_ushort2_rte(uchar2); +ushort2 __ovld __cnfn convert_ushort2_sat_rte(uchar2); +ushort2 __ovld __cnfn convert_ushort2_rtz(uchar2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtz(uchar2); +ushort2 __ovld __cnfn convert_ushort2_rtp(uchar2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtp(uchar2); +ushort2 __ovld __cnfn convert_ushort2_rtn(uchar2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtn(uchar2); +ushort2 __ovld __cnfn convert_ushort2(uchar2); +ushort2 __ovld __cnfn convert_ushort2_sat(uchar2); +ushort2 __ovld __cnfn convert_ushort2_rte(short2); +ushort2 __ovld __cnfn convert_ushort2_sat_rte(short2); +ushort2 __ovld __cnfn convert_ushort2_rtz(short2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtz(short2); +ushort2 __ovld __cnfn convert_ushort2_rtp(short2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtp(short2); +ushort2 __ovld __cnfn convert_ushort2_rtn(short2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtn(short2); +ushort2 __ovld __cnfn convert_ushort2(short2); +ushort2 __ovld __cnfn convert_ushort2_sat(short2); +ushort2 __ovld __cnfn convert_ushort2_rte(ushort2); +ushort2 __ovld __cnfn convert_ushort2_sat_rte(ushort2); +ushort2 __ovld __cnfn convert_ushort2_rtz(ushort2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtz(ushort2); +ushort2 __ovld __cnfn convert_ushort2_rtp(ushort2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtp(ushort2); +ushort2 __ovld __cnfn convert_ushort2_rtn(ushort2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtn(ushort2); +ushort2 __ovld __cnfn convert_ushort2(ushort2); +ushort2 __ovld __cnfn convert_ushort2_sat(ushort2); +ushort2 __ovld __cnfn convert_ushort2_rte(int2); +ushort2 __ovld __cnfn convert_ushort2_sat_rte(int2); +ushort2 __ovld __cnfn convert_ushort2_rtz(int2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtz(int2); +ushort2 __ovld __cnfn convert_ushort2_rtp(int2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtp(int2); +ushort2 __ovld __cnfn convert_ushort2_rtn(int2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtn(int2); +ushort2 __ovld __cnfn convert_ushort2(int2); +ushort2 __ovld __cnfn convert_ushort2_sat(int2); +ushort2 __ovld __cnfn convert_ushort2_rte(uint2); +ushort2 __ovld __cnfn convert_ushort2_sat_rte(uint2); +ushort2 __ovld __cnfn convert_ushort2_rtz(uint2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtz(uint2); +ushort2 __ovld __cnfn convert_ushort2_rtp(uint2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtp(uint2); +ushort2 __ovld __cnfn convert_ushort2_rtn(uint2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtn(uint2); +ushort2 __ovld __cnfn convert_ushort2(uint2); +ushort2 __ovld __cnfn convert_ushort2_sat(uint2); +ushort2 __ovld __cnfn convert_ushort2_rte(long2); +ushort2 __ovld __cnfn convert_ushort2_sat_rte(long2); +ushort2 __ovld __cnfn convert_ushort2_rtz(long2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtz(long2); +ushort2 __ovld __cnfn convert_ushort2_rtp(long2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtp(long2); +ushort2 __ovld __cnfn convert_ushort2_rtn(long2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtn(long2); +ushort2 __ovld __cnfn convert_ushort2(long2); +ushort2 __ovld __cnfn convert_ushort2_sat(long2); +ushort2 __ovld __cnfn convert_ushort2_rte(ulong2); +ushort2 __ovld __cnfn convert_ushort2_sat_rte(ulong2); +ushort2 __ovld __cnfn convert_ushort2_rtz(ulong2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtz(ulong2); +ushort2 __ovld __cnfn convert_ushort2_rtp(ulong2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtp(ulong2); +ushort2 __ovld __cnfn convert_ushort2_rtn(ulong2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtn(ulong2); +ushort2 __ovld __cnfn convert_ushort2(ulong2); +ushort2 __ovld __cnfn convert_ushort2_sat(ulong2); +ushort2 __ovld __cnfn convert_ushort2_rte(float2); +ushort2 __ovld __cnfn convert_ushort2_sat_rte(float2); +ushort2 __ovld __cnfn convert_ushort2_rtz(float2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtz(float2); +ushort2 __ovld __cnfn convert_ushort2_rtp(float2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtp(float2); +ushort2 __ovld __cnfn convert_ushort2_rtn(float2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtn(float2); +ushort2 __ovld __cnfn convert_ushort2(float2); +ushort2 __ovld __cnfn convert_ushort2_sat(float2); +int2 __ovld __cnfn convert_int2_rte(char2); +int2 __ovld __cnfn convert_int2_sat_rte(char2); +int2 __ovld __cnfn convert_int2_rtz(char2); +int2 __ovld __cnfn convert_int2_sat_rtz(char2); +int2 __ovld __cnfn convert_int2_rtp(char2); +int2 __ovld __cnfn convert_int2_sat_rtp(char2); +int2 __ovld __cnfn convert_int2_rtn(char2); +int2 __ovld __cnfn convert_int2_sat_rtn(char2); +int2 __ovld __cnfn convert_int2(char2); +int2 __ovld __cnfn convert_int2_sat(char2); +int2 __ovld __cnfn convert_int2_rte(uchar2); +int2 __ovld __cnfn convert_int2_sat_rte(uchar2); +int2 __ovld __cnfn convert_int2_rtz(uchar2); +int2 __ovld __cnfn convert_int2_sat_rtz(uchar2); +int2 __ovld __cnfn convert_int2_rtp(uchar2); +int2 __ovld __cnfn convert_int2_sat_rtp(uchar2); +int2 __ovld __cnfn convert_int2_rtn(uchar2); +int2 __ovld __cnfn convert_int2_sat_rtn(uchar2); +int2 __ovld __cnfn convert_int2(uchar2); +int2 __ovld __cnfn convert_int2_sat(uchar2); +int2 __ovld __cnfn convert_int2_rte(short2); +int2 __ovld __cnfn convert_int2_sat_rte(short2); +int2 __ovld __cnfn convert_int2_rtz(short2); +int2 __ovld __cnfn convert_int2_sat_rtz(short2); +int2 __ovld __cnfn convert_int2_rtp(short2); +int2 __ovld __cnfn convert_int2_sat_rtp(short2); +int2 __ovld __cnfn convert_int2_rtn(short2); +int2 __ovld __cnfn convert_int2_sat_rtn(short2); +int2 __ovld __cnfn convert_int2(short2); +int2 __ovld __cnfn convert_int2_sat(short2); +int2 __ovld __cnfn convert_int2_rte(ushort2); +int2 __ovld __cnfn convert_int2_sat_rte(ushort2); +int2 __ovld __cnfn convert_int2_rtz(ushort2); +int2 __ovld __cnfn convert_int2_sat_rtz(ushort2); +int2 __ovld __cnfn convert_int2_rtp(ushort2); +int2 __ovld __cnfn convert_int2_sat_rtp(ushort2); +int2 __ovld __cnfn convert_int2_rtn(ushort2); +int2 __ovld __cnfn convert_int2_sat_rtn(ushort2); +int2 __ovld __cnfn convert_int2(ushort2); +int2 __ovld __cnfn convert_int2_sat(ushort2); +int2 __ovld __cnfn convert_int2_rte(int2); +int2 __ovld __cnfn convert_int2_sat_rte(int2); +int2 __ovld __cnfn convert_int2_rtz(int2); +int2 __ovld __cnfn convert_int2_sat_rtz(int2); +int2 __ovld __cnfn convert_int2_rtp(int2); +int2 __ovld __cnfn convert_int2_sat_rtp(int2); +int2 __ovld __cnfn convert_int2_rtn(int2); +int2 __ovld __cnfn convert_int2_sat_rtn(int2); +int2 __ovld __cnfn convert_int2(int2); +int2 __ovld __cnfn convert_int2_sat(int2); +int2 __ovld __cnfn convert_int2_rte(uint2); +int2 __ovld __cnfn convert_int2_sat_rte(uint2); +int2 __ovld __cnfn convert_int2_rtz(uint2); +int2 __ovld __cnfn convert_int2_sat_rtz(uint2); +int2 __ovld __cnfn convert_int2_rtp(uint2); +int2 __ovld __cnfn convert_int2_sat_rtp(uint2); +int2 __ovld __cnfn convert_int2_rtn(uint2); +int2 __ovld __cnfn convert_int2_sat_rtn(uint2); +int2 __ovld __cnfn convert_int2(uint2); +int2 __ovld __cnfn convert_int2_sat(uint2); +int2 __ovld __cnfn convert_int2_rte(long2); +int2 __ovld __cnfn convert_int2_sat_rte(long2); +int2 __ovld __cnfn convert_int2_rtz(long2); +int2 __ovld __cnfn convert_int2_sat_rtz(long2); +int2 __ovld __cnfn convert_int2_rtp(long2); +int2 __ovld __cnfn convert_int2_sat_rtp(long2); +int2 __ovld __cnfn convert_int2_rtn(long2); +int2 __ovld __cnfn convert_int2_sat_rtn(long2); +int2 __ovld __cnfn convert_int2(long2); +int2 __ovld __cnfn convert_int2_sat(long2); +int2 __ovld __cnfn convert_int2_rte(ulong2); +int2 __ovld __cnfn convert_int2_sat_rte(ulong2); +int2 __ovld __cnfn convert_int2_rtz(ulong2); +int2 __ovld __cnfn convert_int2_sat_rtz(ulong2); +int2 __ovld __cnfn convert_int2_rtp(ulong2); +int2 __ovld __cnfn convert_int2_sat_rtp(ulong2); +int2 __ovld __cnfn convert_int2_rtn(ulong2); +int2 __ovld __cnfn convert_int2_sat_rtn(ulong2); +int2 __ovld __cnfn convert_int2(ulong2); +int2 __ovld __cnfn convert_int2_sat(ulong2); +int2 __ovld __cnfn convert_int2_rte(float2); +int2 __ovld __cnfn convert_int2_sat_rte(float2); +int2 __ovld __cnfn convert_int2_rtz(float2); +int2 __ovld __cnfn convert_int2_sat_rtz(float2); +int2 __ovld __cnfn convert_int2_rtp(float2); +int2 __ovld __cnfn convert_int2_sat_rtp(float2); +int2 __ovld __cnfn convert_int2_rtn(float2); +int2 __ovld __cnfn convert_int2_sat_rtn(float2); +int2 __ovld __cnfn convert_int2(float2); +int2 __ovld __cnfn convert_int2_sat(float2); +uint2 __ovld __cnfn convert_uint2_rte(char2); +uint2 __ovld __cnfn convert_uint2_sat_rte(char2); +uint2 __ovld __cnfn convert_uint2_rtz(char2); +uint2 __ovld __cnfn convert_uint2_sat_rtz(char2); +uint2 __ovld __cnfn convert_uint2_rtp(char2); +uint2 __ovld __cnfn convert_uint2_sat_rtp(char2); +uint2 __ovld __cnfn convert_uint2_rtn(char2); +uint2 __ovld __cnfn convert_uint2_sat_rtn(char2); +uint2 __ovld __cnfn convert_uint2(char2); +uint2 __ovld __cnfn convert_uint2_sat(char2); +uint2 __ovld __cnfn convert_uint2_rte(uchar2); +uint2 __ovld __cnfn convert_uint2_sat_rte(uchar2); +uint2 __ovld __cnfn convert_uint2_rtz(uchar2); +uint2 __ovld __cnfn convert_uint2_sat_rtz(uchar2); +uint2 __ovld __cnfn convert_uint2_rtp(uchar2); +uint2 __ovld __cnfn convert_uint2_sat_rtp(uchar2); +uint2 __ovld __cnfn convert_uint2_rtn(uchar2); +uint2 __ovld __cnfn convert_uint2_sat_rtn(uchar2); +uint2 __ovld __cnfn convert_uint2(uchar2); +uint2 __ovld __cnfn convert_uint2_sat(uchar2); +uint2 __ovld __cnfn convert_uint2_rte(short2); +uint2 __ovld __cnfn convert_uint2_sat_rte(short2); +uint2 __ovld __cnfn convert_uint2_rtz(short2); +uint2 __ovld __cnfn convert_uint2_sat_rtz(short2); +uint2 __ovld __cnfn convert_uint2_rtp(short2); +uint2 __ovld __cnfn convert_uint2_sat_rtp(short2); +uint2 __ovld __cnfn convert_uint2_rtn(short2); +uint2 __ovld __cnfn convert_uint2_sat_rtn(short2); +uint2 __ovld __cnfn convert_uint2(short2); +uint2 __ovld __cnfn convert_uint2_sat(short2); +uint2 __ovld __cnfn convert_uint2_rte(ushort2); +uint2 __ovld __cnfn convert_uint2_sat_rte(ushort2); +uint2 __ovld __cnfn convert_uint2_rtz(ushort2); +uint2 __ovld __cnfn convert_uint2_sat_rtz(ushort2); +uint2 __ovld __cnfn convert_uint2_rtp(ushort2); +uint2 __ovld __cnfn convert_uint2_sat_rtp(ushort2); +uint2 __ovld __cnfn convert_uint2_rtn(ushort2); +uint2 __ovld __cnfn convert_uint2_sat_rtn(ushort2); +uint2 __ovld __cnfn convert_uint2(ushort2); +uint2 __ovld __cnfn convert_uint2_sat(ushort2); +uint2 __ovld __cnfn convert_uint2_rte(int2); +uint2 __ovld __cnfn convert_uint2_sat_rte(int2); +uint2 __ovld __cnfn convert_uint2_rtz(int2); +uint2 __ovld __cnfn convert_uint2_sat_rtz(int2); +uint2 __ovld __cnfn convert_uint2_rtp(int2); +uint2 __ovld __cnfn convert_uint2_sat_rtp(int2); +uint2 __ovld __cnfn convert_uint2_rtn(int2); +uint2 __ovld __cnfn convert_uint2_sat_rtn(int2); +uint2 __ovld __cnfn convert_uint2(int2); +uint2 __ovld __cnfn convert_uint2_sat(int2); +uint2 __ovld __cnfn convert_uint2_rte(uint2); +uint2 __ovld __cnfn convert_uint2_sat_rte(uint2); +uint2 __ovld __cnfn convert_uint2_rtz(uint2); +uint2 __ovld __cnfn convert_uint2_sat_rtz(uint2); +uint2 __ovld __cnfn convert_uint2_rtp(uint2); +uint2 __ovld __cnfn convert_uint2_sat_rtp(uint2); +uint2 __ovld __cnfn convert_uint2_rtn(uint2); +uint2 __ovld __cnfn convert_uint2_sat_rtn(uint2); +uint2 __ovld __cnfn convert_uint2(uint2); +uint2 __ovld __cnfn convert_uint2_sat(uint2); +uint2 __ovld __cnfn convert_uint2_rte(long2); +uint2 __ovld __cnfn convert_uint2_sat_rte(long2); +uint2 __ovld __cnfn convert_uint2_rtz(long2); +uint2 __ovld __cnfn convert_uint2_sat_rtz(long2); +uint2 __ovld __cnfn convert_uint2_rtp(long2); +uint2 __ovld __cnfn convert_uint2_sat_rtp(long2); +uint2 __ovld __cnfn convert_uint2_rtn(long2); +uint2 __ovld __cnfn convert_uint2_sat_rtn(long2); +uint2 __ovld __cnfn convert_uint2(long2); +uint2 __ovld __cnfn convert_uint2_sat(long2); +uint2 __ovld __cnfn convert_uint2_rte(ulong2); +uint2 __ovld __cnfn convert_uint2_sat_rte(ulong2); +uint2 __ovld __cnfn convert_uint2_rtz(ulong2); +uint2 __ovld __cnfn convert_uint2_sat_rtz(ulong2); +uint2 __ovld __cnfn convert_uint2_rtp(ulong2); +uint2 __ovld __cnfn convert_uint2_sat_rtp(ulong2); +uint2 __ovld __cnfn convert_uint2_rtn(ulong2); +uint2 __ovld __cnfn convert_uint2_sat_rtn(ulong2); +uint2 __ovld __cnfn convert_uint2(ulong2); +uint2 __ovld __cnfn convert_uint2_sat(ulong2); +uint2 __ovld __cnfn convert_uint2_rte(float2); +uint2 __ovld __cnfn convert_uint2_sat_rte(float2); +uint2 __ovld __cnfn convert_uint2_rtz(float2); +uint2 __ovld __cnfn convert_uint2_sat_rtz(float2); +uint2 __ovld __cnfn convert_uint2_rtp(float2); +uint2 __ovld __cnfn convert_uint2_sat_rtp(float2); +uint2 __ovld __cnfn convert_uint2_rtn(float2); +uint2 __ovld __cnfn convert_uint2_sat_rtn(float2); +uint2 __ovld __cnfn convert_uint2(float2); +uint2 __ovld __cnfn convert_uint2_sat(float2); +long2 __ovld __cnfn convert_long2_rte(char2); +long2 __ovld __cnfn convert_long2_sat_rte(char2); +long2 __ovld __cnfn convert_long2_rtz(char2); +long2 __ovld __cnfn convert_long2_sat_rtz(char2); +long2 __ovld __cnfn convert_long2_rtp(char2); +long2 __ovld __cnfn convert_long2_sat_rtp(char2); +long2 __ovld __cnfn convert_long2_rtn(char2); +long2 __ovld __cnfn convert_long2_sat_rtn(char2); +long2 __ovld __cnfn convert_long2(char2); +long2 __ovld __cnfn convert_long2_sat(char2); +long2 __ovld __cnfn convert_long2_rte(uchar2); +long2 __ovld __cnfn convert_long2_sat_rte(uchar2); +long2 __ovld __cnfn convert_long2_rtz(uchar2); +long2 __ovld __cnfn convert_long2_sat_rtz(uchar2); +long2 __ovld __cnfn convert_long2_rtp(uchar2); +long2 __ovld __cnfn convert_long2_sat_rtp(uchar2); +long2 __ovld __cnfn convert_long2_rtn(uchar2); +long2 __ovld __cnfn convert_long2_sat_rtn(uchar2); +long2 __ovld __cnfn convert_long2(uchar2); +long2 __ovld __cnfn convert_long2_sat(uchar2); +long2 __ovld __cnfn convert_long2_rte(short2); +long2 __ovld __cnfn convert_long2_sat_rte(short2); +long2 __ovld __cnfn convert_long2_rtz(short2); +long2 __ovld __cnfn convert_long2_sat_rtz(short2); +long2 __ovld __cnfn convert_long2_rtp(short2); +long2 __ovld __cnfn convert_long2_sat_rtp(short2); +long2 __ovld __cnfn convert_long2_rtn(short2); +long2 __ovld __cnfn convert_long2_sat_rtn(short2); +long2 __ovld __cnfn convert_long2(short2); +long2 __ovld __cnfn convert_long2_sat(short2); +long2 __ovld __cnfn convert_long2_rte(ushort2); +long2 __ovld __cnfn convert_long2_sat_rte(ushort2); +long2 __ovld __cnfn convert_long2_rtz(ushort2); +long2 __ovld __cnfn convert_long2_sat_rtz(ushort2); +long2 __ovld __cnfn convert_long2_rtp(ushort2); +long2 __ovld __cnfn convert_long2_sat_rtp(ushort2); +long2 __ovld __cnfn convert_long2_rtn(ushort2); +long2 __ovld __cnfn convert_long2_sat_rtn(ushort2); +long2 __ovld __cnfn convert_long2(ushort2); +long2 __ovld __cnfn convert_long2_sat(ushort2); +long2 __ovld __cnfn convert_long2_rte(int2); +long2 __ovld __cnfn convert_long2_sat_rte(int2); +long2 __ovld __cnfn convert_long2_rtz(int2); +long2 __ovld __cnfn convert_long2_sat_rtz(int2); +long2 __ovld __cnfn convert_long2_rtp(int2); +long2 __ovld __cnfn convert_long2_sat_rtp(int2); +long2 __ovld __cnfn convert_long2_rtn(int2); +long2 __ovld __cnfn convert_long2_sat_rtn(int2); +long2 __ovld __cnfn convert_long2(int2); +long2 __ovld __cnfn convert_long2_sat(int2); +long2 __ovld __cnfn convert_long2_rte(uint2); +long2 __ovld __cnfn convert_long2_sat_rte(uint2); +long2 __ovld __cnfn convert_long2_rtz(uint2); +long2 __ovld __cnfn convert_long2_sat_rtz(uint2); +long2 __ovld __cnfn convert_long2_rtp(uint2); +long2 __ovld __cnfn convert_long2_sat_rtp(uint2); +long2 __ovld __cnfn convert_long2_rtn(uint2); +long2 __ovld __cnfn convert_long2_sat_rtn(uint2); +long2 __ovld __cnfn convert_long2(uint2); +long2 __ovld __cnfn convert_long2_sat(uint2); +long2 __ovld __cnfn convert_long2_rte(long2); +long2 __ovld __cnfn convert_long2_sat_rte(long2); +long2 __ovld __cnfn convert_long2_rtz(long2); +long2 __ovld __cnfn convert_long2_sat_rtz(long2); +long2 __ovld __cnfn convert_long2_rtp(long2); +long2 __ovld __cnfn convert_long2_sat_rtp(long2); +long2 __ovld __cnfn convert_long2_rtn(long2); +long2 __ovld __cnfn convert_long2_sat_rtn(long2); +long2 __ovld __cnfn convert_long2(long2); +long2 __ovld __cnfn convert_long2_sat(long2); +long2 __ovld __cnfn convert_long2_rte(ulong2); +long2 __ovld __cnfn convert_long2_sat_rte(ulong2); +long2 __ovld __cnfn convert_long2_rtz(ulong2); +long2 __ovld __cnfn convert_long2_sat_rtz(ulong2); +long2 __ovld __cnfn convert_long2_rtp(ulong2); +long2 __ovld __cnfn convert_long2_sat_rtp(ulong2); +long2 __ovld __cnfn convert_long2_rtn(ulong2); +long2 __ovld __cnfn convert_long2_sat_rtn(ulong2); +long2 __ovld __cnfn convert_long2(ulong2); +long2 __ovld __cnfn convert_long2_sat(ulong2); +long2 __ovld __cnfn convert_long2_rte(float2); +long2 __ovld __cnfn convert_long2_sat_rte(float2); +long2 __ovld __cnfn convert_long2_rtz(float2); +long2 __ovld __cnfn convert_long2_sat_rtz(float2); +long2 __ovld __cnfn convert_long2_rtp(float2); +long2 __ovld __cnfn convert_long2_sat_rtp(float2); +long2 __ovld __cnfn convert_long2_rtn(float2); +long2 __ovld __cnfn convert_long2_sat_rtn(float2); +long2 __ovld __cnfn convert_long2(float2); +long2 __ovld __cnfn convert_long2_sat(float2); +ulong2 __ovld __cnfn convert_ulong2_rte(char2); +ulong2 __ovld __cnfn convert_ulong2_sat_rte(char2); +ulong2 __ovld __cnfn convert_ulong2_rtz(char2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtz(char2); +ulong2 __ovld __cnfn convert_ulong2_rtp(char2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtp(char2); +ulong2 __ovld __cnfn convert_ulong2_rtn(char2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtn(char2); +ulong2 __ovld __cnfn convert_ulong2(char2); +ulong2 __ovld __cnfn convert_ulong2_sat(char2); +ulong2 __ovld __cnfn convert_ulong2_rte(uchar2); +ulong2 __ovld __cnfn convert_ulong2_sat_rte(uchar2); +ulong2 __ovld __cnfn convert_ulong2_rtz(uchar2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtz(uchar2); +ulong2 __ovld __cnfn convert_ulong2_rtp(uchar2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtp(uchar2); +ulong2 __ovld __cnfn convert_ulong2_rtn(uchar2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtn(uchar2); +ulong2 __ovld __cnfn convert_ulong2(uchar2); +ulong2 __ovld __cnfn convert_ulong2_sat(uchar2); +ulong2 __ovld __cnfn convert_ulong2_rte(short2); +ulong2 __ovld __cnfn convert_ulong2_sat_rte(short2); +ulong2 __ovld __cnfn convert_ulong2_rtz(short2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtz(short2); +ulong2 __ovld __cnfn convert_ulong2_rtp(short2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtp(short2); +ulong2 __ovld __cnfn convert_ulong2_rtn(short2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtn(short2); +ulong2 __ovld __cnfn convert_ulong2(short2); +ulong2 __ovld __cnfn convert_ulong2_sat(short2); +ulong2 __ovld __cnfn convert_ulong2_rte(ushort2); +ulong2 __ovld __cnfn convert_ulong2_sat_rte(ushort2); +ulong2 __ovld __cnfn convert_ulong2_rtz(ushort2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtz(ushort2); +ulong2 __ovld __cnfn convert_ulong2_rtp(ushort2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtp(ushort2); +ulong2 __ovld __cnfn convert_ulong2_rtn(ushort2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtn(ushort2); +ulong2 __ovld __cnfn convert_ulong2(ushort2); +ulong2 __ovld __cnfn convert_ulong2_sat(ushort2); +ulong2 __ovld __cnfn convert_ulong2_rte(int2); +ulong2 __ovld __cnfn convert_ulong2_sat_rte(int2); +ulong2 __ovld __cnfn convert_ulong2_rtz(int2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtz(int2); +ulong2 __ovld __cnfn convert_ulong2_rtp(int2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtp(int2); +ulong2 __ovld __cnfn convert_ulong2_rtn(int2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtn(int2); +ulong2 __ovld __cnfn convert_ulong2(int2); +ulong2 __ovld __cnfn convert_ulong2_sat(int2); +ulong2 __ovld __cnfn convert_ulong2_rte(uint2); +ulong2 __ovld __cnfn convert_ulong2_sat_rte(uint2); +ulong2 __ovld __cnfn convert_ulong2_rtz(uint2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtz(uint2); +ulong2 __ovld __cnfn convert_ulong2_rtp(uint2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtp(uint2); +ulong2 __ovld __cnfn convert_ulong2_rtn(uint2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtn(uint2); +ulong2 __ovld __cnfn convert_ulong2(uint2); +ulong2 __ovld __cnfn convert_ulong2_sat(uint2); +ulong2 __ovld __cnfn convert_ulong2_rte(long2); +ulong2 __ovld __cnfn convert_ulong2_sat_rte(long2); +ulong2 __ovld __cnfn convert_ulong2_rtz(long2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtz(long2); +ulong2 __ovld __cnfn convert_ulong2_rtp(long2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtp(long2); +ulong2 __ovld __cnfn convert_ulong2_rtn(long2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtn(long2); +ulong2 __ovld __cnfn convert_ulong2(long2); +ulong2 __ovld __cnfn convert_ulong2_sat(long2); +ulong2 __ovld __cnfn convert_ulong2_rte(ulong2); +ulong2 __ovld __cnfn convert_ulong2_sat_rte(ulong2); +ulong2 __ovld __cnfn convert_ulong2_rtz(ulong2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtz(ulong2); +ulong2 __ovld __cnfn convert_ulong2_rtp(ulong2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtp(ulong2); +ulong2 __ovld __cnfn convert_ulong2_rtn(ulong2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtn(ulong2); +ulong2 __ovld __cnfn convert_ulong2(ulong2); +ulong2 __ovld __cnfn convert_ulong2_sat(ulong2); +ulong2 __ovld __cnfn convert_ulong2_rte(float2); +ulong2 __ovld __cnfn convert_ulong2_sat_rte(float2); +ulong2 __ovld __cnfn convert_ulong2_rtz(float2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtz(float2); +ulong2 __ovld __cnfn convert_ulong2_rtp(float2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtp(float2); +ulong2 __ovld __cnfn convert_ulong2_rtn(float2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtn(float2); +ulong2 __ovld __cnfn convert_ulong2(float2); +ulong2 __ovld __cnfn convert_ulong2_sat(float2); +float2 __ovld __cnfn convert_float2_rte(char2); +float2 __ovld __cnfn convert_float2_rtz(char2); +float2 __ovld __cnfn convert_float2_rtp(char2); +float2 __ovld __cnfn convert_float2_rtn(char2); +float2 __ovld __cnfn convert_float2(char2); +float2 __ovld __cnfn convert_float2_rte(uchar2); +float2 __ovld __cnfn convert_float2_rtz(uchar2); +float2 __ovld __cnfn convert_float2_rtp(uchar2); +float2 __ovld __cnfn convert_float2_rtn(uchar2); +float2 __ovld __cnfn convert_float2(uchar2); +float2 __ovld __cnfn convert_float2_rte(short2); +float2 __ovld __cnfn convert_float2_rtz(short2); +float2 __ovld __cnfn convert_float2_rtp(short2); +float2 __ovld __cnfn convert_float2_rtn(short2); +float2 __ovld __cnfn convert_float2(short2); +float2 __ovld __cnfn convert_float2_rte(ushort2); +float2 __ovld __cnfn convert_float2_rtz(ushort2); +float2 __ovld __cnfn convert_float2_rtp(ushort2); +float2 __ovld __cnfn convert_float2_rtn(ushort2); +float2 __ovld __cnfn convert_float2(ushort2); +float2 __ovld __cnfn convert_float2_rte(int2); +float2 __ovld __cnfn convert_float2_rtz(int2); +float2 __ovld __cnfn convert_float2_rtp(int2); +float2 __ovld __cnfn convert_float2_rtn(int2); +float2 __ovld __cnfn convert_float2(int2); +float2 __ovld __cnfn convert_float2_rte(uint2); +float2 __ovld __cnfn convert_float2_rtz(uint2); +float2 __ovld __cnfn convert_float2_rtp(uint2); +float2 __ovld __cnfn convert_float2_rtn(uint2); +float2 __ovld __cnfn convert_float2(uint2); +float2 __ovld __cnfn convert_float2_rte(long2); +float2 __ovld __cnfn convert_float2_rtz(long2); +float2 __ovld __cnfn convert_float2_rtp(long2); +float2 __ovld __cnfn convert_float2_rtn(long2); +float2 __ovld __cnfn convert_float2(long2); +float2 __ovld __cnfn convert_float2_rte(ulong2); +float2 __ovld __cnfn convert_float2_rtz(ulong2); +float2 __ovld __cnfn convert_float2_rtp(ulong2); +float2 __ovld __cnfn convert_float2_rtn(ulong2); +float2 __ovld __cnfn convert_float2(ulong2); +float2 __ovld __cnfn convert_float2_rte(float2); +float2 __ovld __cnfn convert_float2_rtz(float2); +float2 __ovld __cnfn convert_float2_rtp(float2); +float2 __ovld __cnfn convert_float2_rtn(float2); +float2 __ovld __cnfn convert_float2(float2); +char3 __ovld __cnfn convert_char3_rte(char3); +char3 __ovld __cnfn convert_char3_sat_rte(char3); +char3 __ovld __cnfn convert_char3_rtz(char3); +char3 __ovld __cnfn convert_char3_sat_rtz(char3); +char3 __ovld __cnfn convert_char3_rtp(char3); +char3 __ovld __cnfn convert_char3_sat_rtp(char3); +char3 __ovld __cnfn convert_char3_rtn(char3); +char3 __ovld __cnfn convert_char3_sat_rtn(char3); +char3 __ovld __cnfn convert_char3(char3); +char3 __ovld __cnfn convert_char3_sat(char3); +char3 __ovld __cnfn convert_char3_rte(uchar3); +char3 __ovld __cnfn convert_char3_sat_rte(uchar3); +char3 __ovld __cnfn convert_char3_rtz(uchar3); +char3 __ovld __cnfn convert_char3_sat_rtz(uchar3); +char3 __ovld __cnfn convert_char3_rtp(uchar3); +char3 __ovld __cnfn convert_char3_sat_rtp(uchar3); +char3 __ovld __cnfn convert_char3_rtn(uchar3); +char3 __ovld __cnfn convert_char3_sat_rtn(uchar3); +char3 __ovld __cnfn convert_char3(uchar3); +char3 __ovld __cnfn convert_char3_sat(uchar3); +char3 __ovld __cnfn convert_char3_rte(short3); +char3 __ovld __cnfn convert_char3_sat_rte(short3); +char3 __ovld __cnfn convert_char3_rtz(short3); +char3 __ovld __cnfn convert_char3_sat_rtz(short3); +char3 __ovld __cnfn convert_char3_rtp(short3); +char3 __ovld __cnfn convert_char3_sat_rtp(short3); +char3 __ovld __cnfn convert_char3_rtn(short3); +char3 __ovld __cnfn convert_char3_sat_rtn(short3); +char3 __ovld __cnfn convert_char3(short3); +char3 __ovld __cnfn convert_char3_sat(short3); +char3 __ovld __cnfn convert_char3_rte(ushort3); +char3 __ovld __cnfn convert_char3_sat_rte(ushort3); +char3 __ovld __cnfn convert_char3_rtz(ushort3); +char3 __ovld __cnfn convert_char3_sat_rtz(ushort3); +char3 __ovld __cnfn convert_char3_rtp(ushort3); +char3 __ovld __cnfn convert_char3_sat_rtp(ushort3); +char3 __ovld __cnfn convert_char3_rtn(ushort3); +char3 __ovld __cnfn convert_char3_sat_rtn(ushort3); +char3 __ovld __cnfn convert_char3(ushort3); +char3 __ovld __cnfn convert_char3_sat(ushort3); +char3 __ovld __cnfn convert_char3_rte(int3); +char3 __ovld __cnfn convert_char3_sat_rte(int3); +char3 __ovld __cnfn convert_char3_rtz(int3); +char3 __ovld __cnfn convert_char3_sat_rtz(int3); +char3 __ovld __cnfn convert_char3_rtp(int3); +char3 __ovld __cnfn convert_char3_sat_rtp(int3); +char3 __ovld __cnfn convert_char3_rtn(int3); +char3 __ovld __cnfn convert_char3_sat_rtn(int3); +char3 __ovld __cnfn convert_char3(int3); +char3 __ovld __cnfn convert_char3_sat(int3); +char3 __ovld __cnfn convert_char3_rte(uint3); +char3 __ovld __cnfn convert_char3_sat_rte(uint3); +char3 __ovld __cnfn convert_char3_rtz(uint3); +char3 __ovld __cnfn convert_char3_sat_rtz(uint3); +char3 __ovld __cnfn convert_char3_rtp(uint3); +char3 __ovld __cnfn convert_char3_sat_rtp(uint3); +char3 __ovld __cnfn convert_char3_rtn(uint3); +char3 __ovld __cnfn convert_char3_sat_rtn(uint3); +char3 __ovld __cnfn convert_char3(uint3); +char3 __ovld __cnfn convert_char3_sat(uint3); +char3 __ovld __cnfn convert_char3_rte(long3); +char3 __ovld __cnfn convert_char3_sat_rte(long3); +char3 __ovld __cnfn convert_char3_rtz(long3); +char3 __ovld __cnfn convert_char3_sat_rtz(long3); +char3 __ovld __cnfn convert_char3_rtp(long3); +char3 __ovld __cnfn convert_char3_sat_rtp(long3); +char3 __ovld __cnfn convert_char3_rtn(long3); +char3 __ovld __cnfn convert_char3_sat_rtn(long3); +char3 __ovld __cnfn convert_char3(long3); +char3 __ovld __cnfn convert_char3_sat(long3); +char3 __ovld __cnfn convert_char3_rte(ulong3); +char3 __ovld __cnfn convert_char3_sat_rte(ulong3); +char3 __ovld __cnfn convert_char3_rtz(ulong3); +char3 __ovld __cnfn convert_char3_sat_rtz(ulong3); +char3 __ovld __cnfn convert_char3_rtp(ulong3); +char3 __ovld __cnfn convert_char3_sat_rtp(ulong3); +char3 __ovld __cnfn convert_char3_rtn(ulong3); +char3 __ovld __cnfn convert_char3_sat_rtn(ulong3); +char3 __ovld __cnfn convert_char3(ulong3); +char3 __ovld __cnfn convert_char3_sat(ulong3); +char3 __ovld __cnfn convert_char3_rte(float3); +char3 __ovld __cnfn convert_char3_sat_rte(float3); +char3 __ovld __cnfn convert_char3_rtz(float3); +char3 __ovld __cnfn convert_char3_sat_rtz(float3); +char3 __ovld __cnfn convert_char3_rtp(float3); +char3 __ovld __cnfn convert_char3_sat_rtp(float3); +char3 __ovld __cnfn convert_char3_rtn(float3); +char3 __ovld __cnfn convert_char3_sat_rtn(float3); +char3 __ovld __cnfn convert_char3(float3); +char3 __ovld __cnfn convert_char3_sat(float3); +uchar3 __ovld __cnfn convert_uchar3_rte(char3); +uchar3 __ovld __cnfn convert_uchar3_sat_rte(char3); +uchar3 __ovld __cnfn convert_uchar3_rtz(char3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtz(char3); +uchar3 __ovld __cnfn convert_uchar3_rtp(char3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtp(char3); +uchar3 __ovld __cnfn convert_uchar3_rtn(char3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtn(char3); +uchar3 __ovld __cnfn convert_uchar3(char3); +uchar3 __ovld __cnfn convert_uchar3_sat(char3); +uchar3 __ovld __cnfn convert_uchar3_rte(uchar3); +uchar3 __ovld __cnfn convert_uchar3_sat_rte(uchar3); +uchar3 __ovld __cnfn convert_uchar3_rtz(uchar3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtz(uchar3); +uchar3 __ovld __cnfn convert_uchar3_rtp(uchar3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtp(uchar3); +uchar3 __ovld __cnfn convert_uchar3_rtn(uchar3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtn(uchar3); +uchar3 __ovld __cnfn convert_uchar3(uchar3); +uchar3 __ovld __cnfn convert_uchar3_sat(uchar3); +uchar3 __ovld __cnfn convert_uchar3_rte(short3); +uchar3 __ovld __cnfn convert_uchar3_sat_rte(short3); +uchar3 __ovld __cnfn convert_uchar3_rtz(short3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtz(short3); +uchar3 __ovld __cnfn convert_uchar3_rtp(short3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtp(short3); +uchar3 __ovld __cnfn convert_uchar3_rtn(short3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtn(short3); +uchar3 __ovld __cnfn convert_uchar3(short3); +uchar3 __ovld __cnfn convert_uchar3_sat(short3); +uchar3 __ovld __cnfn convert_uchar3_rte(ushort3); +uchar3 __ovld __cnfn convert_uchar3_sat_rte(ushort3); +uchar3 __ovld __cnfn convert_uchar3_rtz(ushort3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtz(ushort3); +uchar3 __ovld __cnfn convert_uchar3_rtp(ushort3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtp(ushort3); +uchar3 __ovld __cnfn convert_uchar3_rtn(ushort3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtn(ushort3); +uchar3 __ovld __cnfn convert_uchar3(ushort3); +uchar3 __ovld __cnfn convert_uchar3_sat(ushort3); +uchar3 __ovld __cnfn convert_uchar3_rte(int3); +uchar3 __ovld __cnfn convert_uchar3_sat_rte(int3); +uchar3 __ovld __cnfn convert_uchar3_rtz(int3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtz(int3); +uchar3 __ovld __cnfn convert_uchar3_rtp(int3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtp(int3); +uchar3 __ovld __cnfn convert_uchar3_rtn(int3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtn(int3); +uchar3 __ovld __cnfn convert_uchar3(int3); +uchar3 __ovld __cnfn convert_uchar3_sat(int3); +uchar3 __ovld __cnfn convert_uchar3_rte(uint3); +uchar3 __ovld __cnfn convert_uchar3_sat_rte(uint3); +uchar3 __ovld __cnfn convert_uchar3_rtz(uint3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtz(uint3); +uchar3 __ovld __cnfn convert_uchar3_rtp(uint3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtp(uint3); +uchar3 __ovld __cnfn convert_uchar3_rtn(uint3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtn(uint3); +uchar3 __ovld __cnfn convert_uchar3(uint3); +uchar3 __ovld __cnfn convert_uchar3_sat(uint3); +uchar3 __ovld __cnfn convert_uchar3_rte(long3); +uchar3 __ovld __cnfn convert_uchar3_sat_rte(long3); +uchar3 __ovld __cnfn convert_uchar3_rtz(long3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtz(long3); +uchar3 __ovld __cnfn convert_uchar3_rtp(long3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtp(long3); +uchar3 __ovld __cnfn convert_uchar3_rtn(long3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtn(long3); +uchar3 __ovld __cnfn convert_uchar3(long3); +uchar3 __ovld __cnfn convert_uchar3_sat(long3); +uchar3 __ovld __cnfn convert_uchar3_rte(ulong3); +uchar3 __ovld __cnfn convert_uchar3_sat_rte(ulong3); +uchar3 __ovld __cnfn convert_uchar3_rtz(ulong3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtz(ulong3); +uchar3 __ovld __cnfn convert_uchar3_rtp(ulong3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtp(ulong3); +uchar3 __ovld __cnfn convert_uchar3_rtn(ulong3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtn(ulong3); +uchar3 __ovld __cnfn convert_uchar3(ulong3); +uchar3 __ovld __cnfn convert_uchar3_sat(ulong3); +uchar3 __ovld __cnfn convert_uchar3_rte(float3); +uchar3 __ovld __cnfn convert_uchar3_sat_rte(float3); +uchar3 __ovld __cnfn convert_uchar3_rtz(float3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtz(float3); +uchar3 __ovld __cnfn convert_uchar3_rtp(float3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtp(float3); +uchar3 __ovld __cnfn convert_uchar3_rtn(float3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtn(float3); +uchar3 __ovld __cnfn convert_uchar3(float3); +uchar3 __ovld __cnfn convert_uchar3_sat(float3); +short3 __ovld __cnfn convert_short3_rte(char3); +short3 __ovld __cnfn convert_short3_sat_rte(char3); +short3 __ovld __cnfn convert_short3_rtz(char3); +short3 __ovld __cnfn convert_short3_sat_rtz(char3); +short3 __ovld __cnfn convert_short3_rtp(char3); +short3 __ovld __cnfn convert_short3_sat_rtp(char3); +short3 __ovld __cnfn convert_short3_rtn(char3); +short3 __ovld __cnfn convert_short3_sat_rtn(char3); +short3 __ovld __cnfn convert_short3(char3); +short3 __ovld __cnfn convert_short3_sat(char3); +short3 __ovld __cnfn convert_short3_rte(uchar3); +short3 __ovld __cnfn convert_short3_sat_rte(uchar3); +short3 __ovld __cnfn convert_short3_rtz(uchar3); +short3 __ovld __cnfn convert_short3_sat_rtz(uchar3); +short3 __ovld __cnfn convert_short3_rtp(uchar3); +short3 __ovld __cnfn convert_short3_sat_rtp(uchar3); +short3 __ovld __cnfn convert_short3_rtn(uchar3); +short3 __ovld __cnfn convert_short3_sat_rtn(uchar3); +short3 __ovld __cnfn convert_short3(uchar3); +short3 __ovld __cnfn convert_short3_sat(uchar3); +short3 __ovld __cnfn convert_short3_rte(short3); +short3 __ovld __cnfn convert_short3_sat_rte(short3); +short3 __ovld __cnfn convert_short3_rtz(short3); +short3 __ovld __cnfn convert_short3_sat_rtz(short3); +short3 __ovld __cnfn convert_short3_rtp(short3); +short3 __ovld __cnfn convert_short3_sat_rtp(short3); +short3 __ovld __cnfn convert_short3_rtn(short3); +short3 __ovld __cnfn convert_short3_sat_rtn(short3); +short3 __ovld __cnfn convert_short3(short3); +short3 __ovld __cnfn convert_short3_sat(short3); +short3 __ovld __cnfn convert_short3_rte(ushort3); +short3 __ovld __cnfn convert_short3_sat_rte(ushort3); +short3 __ovld __cnfn convert_short3_rtz(ushort3); +short3 __ovld __cnfn convert_short3_sat_rtz(ushort3); +short3 __ovld __cnfn convert_short3_rtp(ushort3); +short3 __ovld __cnfn convert_short3_sat_rtp(ushort3); +short3 __ovld __cnfn convert_short3_rtn(ushort3); +short3 __ovld __cnfn convert_short3_sat_rtn(ushort3); +short3 __ovld __cnfn convert_short3(ushort3); +short3 __ovld __cnfn convert_short3_sat(ushort3); +short3 __ovld __cnfn convert_short3_rte(int3); +short3 __ovld __cnfn convert_short3_sat_rte(int3); +short3 __ovld __cnfn convert_short3_rtz(int3); +short3 __ovld __cnfn convert_short3_sat_rtz(int3); +short3 __ovld __cnfn convert_short3_rtp(int3); +short3 __ovld __cnfn convert_short3_sat_rtp(int3); +short3 __ovld __cnfn convert_short3_rtn(int3); +short3 __ovld __cnfn convert_short3_sat_rtn(int3); +short3 __ovld __cnfn convert_short3(int3); +short3 __ovld __cnfn convert_short3_sat(int3); +short3 __ovld __cnfn convert_short3_rte(uint3); +short3 __ovld __cnfn convert_short3_sat_rte(uint3); +short3 __ovld __cnfn convert_short3_rtz(uint3); +short3 __ovld __cnfn convert_short3_sat_rtz(uint3); +short3 __ovld __cnfn convert_short3_rtp(uint3); +short3 __ovld __cnfn convert_short3_sat_rtp(uint3); +short3 __ovld __cnfn convert_short3_rtn(uint3); +short3 __ovld __cnfn convert_short3_sat_rtn(uint3); +short3 __ovld __cnfn convert_short3(uint3); +short3 __ovld __cnfn convert_short3_sat(uint3); +short3 __ovld __cnfn convert_short3_rte(long3); +short3 __ovld __cnfn convert_short3_sat_rte(long3); +short3 __ovld __cnfn convert_short3_rtz(long3); +short3 __ovld __cnfn convert_short3_sat_rtz(long3); +short3 __ovld __cnfn convert_short3_rtp(long3); +short3 __ovld __cnfn convert_short3_sat_rtp(long3); +short3 __ovld __cnfn convert_short3_rtn(long3); +short3 __ovld __cnfn convert_short3_sat_rtn(long3); +short3 __ovld __cnfn convert_short3(long3); +short3 __ovld __cnfn convert_short3_sat(long3); +short3 __ovld __cnfn convert_short3_rte(ulong3); +short3 __ovld __cnfn convert_short3_sat_rte(ulong3); +short3 __ovld __cnfn convert_short3_rtz(ulong3); +short3 __ovld __cnfn convert_short3_sat_rtz(ulong3); +short3 __ovld __cnfn convert_short3_rtp(ulong3); +short3 __ovld __cnfn convert_short3_sat_rtp(ulong3); +short3 __ovld __cnfn convert_short3_rtn(ulong3); +short3 __ovld __cnfn convert_short3_sat_rtn(ulong3); +short3 __ovld __cnfn convert_short3(ulong3); +short3 __ovld __cnfn convert_short3_sat(ulong3); +short3 __ovld __cnfn convert_short3_rte(float3); +short3 __ovld __cnfn convert_short3_sat_rte(float3); +short3 __ovld __cnfn convert_short3_rtz(float3); +short3 __ovld __cnfn convert_short3_sat_rtz(float3); +short3 __ovld __cnfn convert_short3_rtp(float3); +short3 __ovld __cnfn convert_short3_sat_rtp(float3); +short3 __ovld __cnfn convert_short3_rtn(float3); +short3 __ovld __cnfn convert_short3_sat_rtn(float3); +short3 __ovld __cnfn convert_short3(float3); +short3 __ovld __cnfn convert_short3_sat(float3); +ushort3 __ovld __cnfn convert_ushort3_rte(char3); +ushort3 __ovld __cnfn convert_ushort3_sat_rte(char3); +ushort3 __ovld __cnfn convert_ushort3_rtz(char3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtz(char3); +ushort3 __ovld __cnfn convert_ushort3_rtp(char3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtp(char3); +ushort3 __ovld __cnfn convert_ushort3_rtn(char3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtn(char3); +ushort3 __ovld __cnfn convert_ushort3(char3); +ushort3 __ovld __cnfn convert_ushort3_sat(char3); +ushort3 __ovld __cnfn convert_ushort3_rte(uchar3); +ushort3 __ovld __cnfn convert_ushort3_sat_rte(uchar3); +ushort3 __ovld __cnfn convert_ushort3_rtz(uchar3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtz(uchar3); +ushort3 __ovld __cnfn convert_ushort3_rtp(uchar3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtp(uchar3); +ushort3 __ovld __cnfn convert_ushort3_rtn(uchar3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtn(uchar3); +ushort3 __ovld __cnfn convert_ushort3(uchar3); +ushort3 __ovld __cnfn convert_ushort3_sat(uchar3); +ushort3 __ovld __cnfn convert_ushort3_rte(short3); +ushort3 __ovld __cnfn convert_ushort3_sat_rte(short3); +ushort3 __ovld __cnfn convert_ushort3_rtz(short3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtz(short3); +ushort3 __ovld __cnfn convert_ushort3_rtp(short3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtp(short3); +ushort3 __ovld __cnfn convert_ushort3_rtn(short3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtn(short3); +ushort3 __ovld __cnfn convert_ushort3(short3); +ushort3 __ovld __cnfn convert_ushort3_sat(short3); +ushort3 __ovld __cnfn convert_ushort3_rte(ushort3); +ushort3 __ovld __cnfn convert_ushort3_sat_rte(ushort3); +ushort3 __ovld __cnfn convert_ushort3_rtz(ushort3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtz(ushort3); +ushort3 __ovld __cnfn convert_ushort3_rtp(ushort3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtp(ushort3); +ushort3 __ovld __cnfn convert_ushort3_rtn(ushort3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtn(ushort3); +ushort3 __ovld __cnfn convert_ushort3(ushort3); +ushort3 __ovld __cnfn convert_ushort3_sat(ushort3); +ushort3 __ovld __cnfn convert_ushort3_rte(int3); +ushort3 __ovld __cnfn convert_ushort3_sat_rte(int3); +ushort3 __ovld __cnfn convert_ushort3_rtz(int3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtz(int3); +ushort3 __ovld __cnfn convert_ushort3_rtp(int3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtp(int3); +ushort3 __ovld __cnfn convert_ushort3_rtn(int3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtn(int3); +ushort3 __ovld __cnfn convert_ushort3(int3); +ushort3 __ovld __cnfn convert_ushort3_sat(int3); +ushort3 __ovld __cnfn convert_ushort3_rte(uint3); +ushort3 __ovld __cnfn convert_ushort3_sat_rte(uint3); +ushort3 __ovld __cnfn convert_ushort3_rtz(uint3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtz(uint3); +ushort3 __ovld __cnfn convert_ushort3_rtp(uint3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtp(uint3); +ushort3 __ovld __cnfn convert_ushort3_rtn(uint3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtn(uint3); +ushort3 __ovld __cnfn convert_ushort3(uint3); +ushort3 __ovld __cnfn convert_ushort3_sat(uint3); +ushort3 __ovld __cnfn convert_ushort3_rte(long3); +ushort3 __ovld __cnfn convert_ushort3_sat_rte(long3); +ushort3 __ovld __cnfn convert_ushort3_rtz(long3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtz(long3); +ushort3 __ovld __cnfn convert_ushort3_rtp(long3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtp(long3); +ushort3 __ovld __cnfn convert_ushort3_rtn(long3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtn(long3); +ushort3 __ovld __cnfn convert_ushort3(long3); +ushort3 __ovld __cnfn convert_ushort3_sat(long3); +ushort3 __ovld __cnfn convert_ushort3_rte(ulong3); +ushort3 __ovld __cnfn convert_ushort3_sat_rte(ulong3); +ushort3 __ovld __cnfn convert_ushort3_rtz(ulong3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtz(ulong3); +ushort3 __ovld __cnfn convert_ushort3_rtp(ulong3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtp(ulong3); +ushort3 __ovld __cnfn convert_ushort3_rtn(ulong3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtn(ulong3); +ushort3 __ovld __cnfn convert_ushort3(ulong3); +ushort3 __ovld __cnfn convert_ushort3_sat(ulong3); +ushort3 __ovld __cnfn convert_ushort3_rte(float3); +ushort3 __ovld __cnfn convert_ushort3_sat_rte(float3); +ushort3 __ovld __cnfn convert_ushort3_rtz(float3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtz(float3); +ushort3 __ovld __cnfn convert_ushort3_rtp(float3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtp(float3); +ushort3 __ovld __cnfn convert_ushort3_rtn(float3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtn(float3); +ushort3 __ovld __cnfn convert_ushort3(float3); +ushort3 __ovld __cnfn convert_ushort3_sat(float3); +int3 __ovld __cnfn convert_int3_rte(char3); +int3 __ovld __cnfn convert_int3_sat_rte(char3); +int3 __ovld __cnfn convert_int3_rtz(char3); +int3 __ovld __cnfn convert_int3_sat_rtz(char3); +int3 __ovld __cnfn convert_int3_rtp(char3); +int3 __ovld __cnfn convert_int3_sat_rtp(char3); +int3 __ovld __cnfn convert_int3_rtn(char3); +int3 __ovld __cnfn convert_int3_sat_rtn(char3); +int3 __ovld __cnfn convert_int3(char3); +int3 __ovld __cnfn convert_int3_sat(char3); +int3 __ovld __cnfn convert_int3_rte(uchar3); +int3 __ovld __cnfn convert_int3_sat_rte(uchar3); +int3 __ovld __cnfn convert_int3_rtz(uchar3); +int3 __ovld __cnfn convert_int3_sat_rtz(uchar3); +int3 __ovld __cnfn convert_int3_rtp(uchar3); +int3 __ovld __cnfn convert_int3_sat_rtp(uchar3); +int3 __ovld __cnfn convert_int3_rtn(uchar3); +int3 __ovld __cnfn convert_int3_sat_rtn(uchar3); +int3 __ovld __cnfn convert_int3(uchar3); +int3 __ovld __cnfn convert_int3_sat(uchar3); +int3 __ovld __cnfn convert_int3_rte(short3); +int3 __ovld __cnfn convert_int3_sat_rte(short3); +int3 __ovld __cnfn convert_int3_rtz(short3); +int3 __ovld __cnfn convert_int3_sat_rtz(short3); +int3 __ovld __cnfn convert_int3_rtp(short3); +int3 __ovld __cnfn convert_int3_sat_rtp(short3); +int3 __ovld __cnfn convert_int3_rtn(short3); +int3 __ovld __cnfn convert_int3_sat_rtn(short3); +int3 __ovld __cnfn convert_int3(short3); +int3 __ovld __cnfn convert_int3_sat(short3); +int3 __ovld __cnfn convert_int3_rte(ushort3); +int3 __ovld __cnfn convert_int3_sat_rte(ushort3); +int3 __ovld __cnfn convert_int3_rtz(ushort3); +int3 __ovld __cnfn convert_int3_sat_rtz(ushort3); +int3 __ovld __cnfn convert_int3_rtp(ushort3); +int3 __ovld __cnfn convert_int3_sat_rtp(ushort3); +int3 __ovld __cnfn convert_int3_rtn(ushort3); +int3 __ovld __cnfn convert_int3_sat_rtn(ushort3); +int3 __ovld __cnfn convert_int3(ushort3); +int3 __ovld __cnfn convert_int3_sat(ushort3); +int3 __ovld __cnfn convert_int3_rte(int3); +int3 __ovld __cnfn convert_int3_sat_rte(int3); +int3 __ovld __cnfn convert_int3_rtz(int3); +int3 __ovld __cnfn convert_int3_sat_rtz(int3); +int3 __ovld __cnfn convert_int3_rtp(int3); +int3 __ovld __cnfn convert_int3_sat_rtp(int3); +int3 __ovld __cnfn convert_int3_rtn(int3); +int3 __ovld __cnfn convert_int3_sat_rtn(int3); +int3 __ovld __cnfn convert_int3(int3); +int3 __ovld __cnfn convert_int3_sat(int3); +int3 __ovld __cnfn convert_int3_rte(uint3); +int3 __ovld __cnfn convert_int3_sat_rte(uint3); +int3 __ovld __cnfn convert_int3_rtz(uint3); +int3 __ovld __cnfn convert_int3_sat_rtz(uint3); +int3 __ovld __cnfn convert_int3_rtp(uint3); +int3 __ovld __cnfn convert_int3_sat_rtp(uint3); +int3 __ovld __cnfn convert_int3_rtn(uint3); +int3 __ovld __cnfn convert_int3_sat_rtn(uint3); +int3 __ovld __cnfn convert_int3(uint3); +int3 __ovld __cnfn convert_int3_sat(uint3); +int3 __ovld __cnfn convert_int3_rte(long3); +int3 __ovld __cnfn convert_int3_sat_rte(long3); +int3 __ovld __cnfn convert_int3_rtz(long3); +int3 __ovld __cnfn convert_int3_sat_rtz(long3); +int3 __ovld __cnfn convert_int3_rtp(long3); +int3 __ovld __cnfn convert_int3_sat_rtp(long3); +int3 __ovld __cnfn convert_int3_rtn(long3); +int3 __ovld __cnfn convert_int3_sat_rtn(long3); +int3 __ovld __cnfn convert_int3(long3); +int3 __ovld __cnfn convert_int3_sat(long3); +int3 __ovld __cnfn convert_int3_rte(ulong3); +int3 __ovld __cnfn convert_int3_sat_rte(ulong3); +int3 __ovld __cnfn convert_int3_rtz(ulong3); +int3 __ovld __cnfn convert_int3_sat_rtz(ulong3); +int3 __ovld __cnfn convert_int3_rtp(ulong3); +int3 __ovld __cnfn convert_int3_sat_rtp(ulong3); +int3 __ovld __cnfn convert_int3_rtn(ulong3); +int3 __ovld __cnfn convert_int3_sat_rtn(ulong3); +int3 __ovld __cnfn convert_int3(ulong3); +int3 __ovld __cnfn convert_int3_sat(ulong3); +int3 __ovld __cnfn convert_int3_rte(float3); +int3 __ovld __cnfn convert_int3_sat_rte(float3); +int3 __ovld __cnfn convert_int3_rtz(float3); +int3 __ovld __cnfn convert_int3_sat_rtz(float3); +int3 __ovld __cnfn convert_int3_rtp(float3); +int3 __ovld __cnfn convert_int3_sat_rtp(float3); +int3 __ovld __cnfn convert_int3_rtn(float3); +int3 __ovld __cnfn convert_int3_sat_rtn(float3); +int3 __ovld __cnfn convert_int3(float3); +int3 __ovld __cnfn convert_int3_sat(float3); +uint3 __ovld __cnfn convert_uint3_rte(char3); +uint3 __ovld __cnfn convert_uint3_sat_rte(char3); +uint3 __ovld __cnfn convert_uint3_rtz(char3); +uint3 __ovld __cnfn convert_uint3_sat_rtz(char3); +uint3 __ovld __cnfn convert_uint3_rtp(char3); +uint3 __ovld __cnfn convert_uint3_sat_rtp(char3); +uint3 __ovld __cnfn convert_uint3_rtn(char3); +uint3 __ovld __cnfn convert_uint3_sat_rtn(char3); +uint3 __ovld __cnfn convert_uint3(char3); +uint3 __ovld __cnfn convert_uint3_sat(char3); +uint3 __ovld __cnfn convert_uint3_rte(uchar3); +uint3 __ovld __cnfn convert_uint3_sat_rte(uchar3); +uint3 __ovld __cnfn convert_uint3_rtz(uchar3); +uint3 __ovld __cnfn convert_uint3_sat_rtz(uchar3); +uint3 __ovld __cnfn convert_uint3_rtp(uchar3); +uint3 __ovld __cnfn convert_uint3_sat_rtp(uchar3); +uint3 __ovld __cnfn convert_uint3_rtn(uchar3); +uint3 __ovld __cnfn convert_uint3_sat_rtn(uchar3); +uint3 __ovld __cnfn convert_uint3(uchar3); +uint3 __ovld __cnfn convert_uint3_sat(uchar3); +uint3 __ovld __cnfn convert_uint3_rte(short3); +uint3 __ovld __cnfn convert_uint3_sat_rte(short3); +uint3 __ovld __cnfn convert_uint3_rtz(short3); +uint3 __ovld __cnfn convert_uint3_sat_rtz(short3); +uint3 __ovld __cnfn convert_uint3_rtp(short3); +uint3 __ovld __cnfn convert_uint3_sat_rtp(short3); +uint3 __ovld __cnfn convert_uint3_rtn(short3); +uint3 __ovld __cnfn convert_uint3_sat_rtn(short3); +uint3 __ovld __cnfn convert_uint3(short3); +uint3 __ovld __cnfn convert_uint3_sat(short3); +uint3 __ovld __cnfn convert_uint3_rte(ushort3); +uint3 __ovld __cnfn convert_uint3_sat_rte(ushort3); +uint3 __ovld __cnfn convert_uint3_rtz(ushort3); +uint3 __ovld __cnfn convert_uint3_sat_rtz(ushort3); +uint3 __ovld __cnfn convert_uint3_rtp(ushort3); +uint3 __ovld __cnfn convert_uint3_sat_rtp(ushort3); +uint3 __ovld __cnfn convert_uint3_rtn(ushort3); +uint3 __ovld __cnfn convert_uint3_sat_rtn(ushort3); +uint3 __ovld __cnfn convert_uint3(ushort3); +uint3 __ovld __cnfn convert_uint3_sat(ushort3); +uint3 __ovld __cnfn convert_uint3_rte(int3); +uint3 __ovld __cnfn convert_uint3_sat_rte(int3); +uint3 __ovld __cnfn convert_uint3_rtz(int3); +uint3 __ovld __cnfn convert_uint3_sat_rtz(int3); +uint3 __ovld __cnfn convert_uint3_rtp(int3); +uint3 __ovld __cnfn convert_uint3_sat_rtp(int3); +uint3 __ovld __cnfn convert_uint3_rtn(int3); +uint3 __ovld __cnfn convert_uint3_sat_rtn(int3); +uint3 __ovld __cnfn convert_uint3(int3); +uint3 __ovld __cnfn convert_uint3_sat(int3); +uint3 __ovld __cnfn convert_uint3_rte(uint3); +uint3 __ovld __cnfn convert_uint3_sat_rte(uint3); +uint3 __ovld __cnfn convert_uint3_rtz(uint3); +uint3 __ovld __cnfn convert_uint3_sat_rtz(uint3); +uint3 __ovld __cnfn convert_uint3_rtp(uint3); +uint3 __ovld __cnfn convert_uint3_sat_rtp(uint3); +uint3 __ovld __cnfn convert_uint3_rtn(uint3); +uint3 __ovld __cnfn convert_uint3_sat_rtn(uint3); +uint3 __ovld __cnfn convert_uint3(uint3); +uint3 __ovld __cnfn convert_uint3_sat(uint3); +uint3 __ovld __cnfn convert_uint3_rte(long3); +uint3 __ovld __cnfn convert_uint3_sat_rte(long3); +uint3 __ovld __cnfn convert_uint3_rtz(long3); +uint3 __ovld __cnfn convert_uint3_sat_rtz(long3); +uint3 __ovld __cnfn convert_uint3_rtp(long3); +uint3 __ovld __cnfn convert_uint3_sat_rtp(long3); +uint3 __ovld __cnfn convert_uint3_rtn(long3); +uint3 __ovld __cnfn convert_uint3_sat_rtn(long3); +uint3 __ovld __cnfn convert_uint3(long3); +uint3 __ovld __cnfn convert_uint3_sat(long3); +uint3 __ovld __cnfn convert_uint3_rte(ulong3); +uint3 __ovld __cnfn convert_uint3_sat_rte(ulong3); +uint3 __ovld __cnfn convert_uint3_rtz(ulong3); +uint3 __ovld __cnfn convert_uint3_sat_rtz(ulong3); +uint3 __ovld __cnfn convert_uint3_rtp(ulong3); +uint3 __ovld __cnfn convert_uint3_sat_rtp(ulong3); +uint3 __ovld __cnfn convert_uint3_rtn(ulong3); +uint3 __ovld __cnfn convert_uint3_sat_rtn(ulong3); +uint3 __ovld __cnfn convert_uint3(ulong3); +uint3 __ovld __cnfn convert_uint3_sat(ulong3); +uint3 __ovld __cnfn convert_uint3_rte(float3); +uint3 __ovld __cnfn convert_uint3_sat_rte(float3); +uint3 __ovld __cnfn convert_uint3_rtz(float3); +uint3 __ovld __cnfn convert_uint3_sat_rtz(float3); +uint3 __ovld __cnfn convert_uint3_rtp(float3); +uint3 __ovld __cnfn convert_uint3_sat_rtp(float3); +uint3 __ovld __cnfn convert_uint3_rtn(float3); +uint3 __ovld __cnfn convert_uint3_sat_rtn(float3); +uint3 __ovld __cnfn convert_uint3(float3); +uint3 __ovld __cnfn convert_uint3_sat(float3); +long3 __ovld __cnfn convert_long3_rte(char3); +long3 __ovld __cnfn convert_long3_sat_rte(char3); +long3 __ovld __cnfn convert_long3_rtz(char3); +long3 __ovld __cnfn convert_long3_sat_rtz(char3); +long3 __ovld __cnfn convert_long3_rtp(char3); +long3 __ovld __cnfn convert_long3_sat_rtp(char3); +long3 __ovld __cnfn convert_long3_rtn(char3); +long3 __ovld __cnfn convert_long3_sat_rtn(char3); +long3 __ovld __cnfn convert_long3(char3); +long3 __ovld __cnfn convert_long3_sat(char3); +long3 __ovld __cnfn convert_long3_rte(uchar3); +long3 __ovld __cnfn convert_long3_sat_rte(uchar3); +long3 __ovld __cnfn convert_long3_rtz(uchar3); +long3 __ovld __cnfn convert_long3_sat_rtz(uchar3); +long3 __ovld __cnfn convert_long3_rtp(uchar3); +long3 __ovld __cnfn convert_long3_sat_rtp(uchar3); +long3 __ovld __cnfn convert_long3_rtn(uchar3); +long3 __ovld __cnfn convert_long3_sat_rtn(uchar3); +long3 __ovld __cnfn convert_long3(uchar3); +long3 __ovld __cnfn convert_long3_sat(uchar3); +long3 __ovld __cnfn convert_long3_rte(short3); +long3 __ovld __cnfn convert_long3_sat_rte(short3); +long3 __ovld __cnfn convert_long3_rtz(short3); +long3 __ovld __cnfn convert_long3_sat_rtz(short3); +long3 __ovld __cnfn convert_long3_rtp(short3); +long3 __ovld __cnfn convert_long3_sat_rtp(short3); +long3 __ovld __cnfn convert_long3_rtn(short3); +long3 __ovld __cnfn convert_long3_sat_rtn(short3); +long3 __ovld __cnfn convert_long3(short3); +long3 __ovld __cnfn convert_long3_sat(short3); +long3 __ovld __cnfn convert_long3_rte(ushort3); +long3 __ovld __cnfn convert_long3_sat_rte(ushort3); +long3 __ovld __cnfn convert_long3_rtz(ushort3); +long3 __ovld __cnfn convert_long3_sat_rtz(ushort3); +long3 __ovld __cnfn convert_long3_rtp(ushort3); +long3 __ovld __cnfn convert_long3_sat_rtp(ushort3); +long3 __ovld __cnfn convert_long3_rtn(ushort3); +long3 __ovld __cnfn convert_long3_sat_rtn(ushort3); +long3 __ovld __cnfn convert_long3(ushort3); +long3 __ovld __cnfn convert_long3_sat(ushort3); +long3 __ovld __cnfn convert_long3_rte(int3); +long3 __ovld __cnfn convert_long3_sat_rte(int3); +long3 __ovld __cnfn convert_long3_rtz(int3); +long3 __ovld __cnfn convert_long3_sat_rtz(int3); +long3 __ovld __cnfn convert_long3_rtp(int3); +long3 __ovld __cnfn convert_long3_sat_rtp(int3); +long3 __ovld __cnfn convert_long3_rtn(int3); +long3 __ovld __cnfn convert_long3_sat_rtn(int3); +long3 __ovld __cnfn convert_long3(int3); +long3 __ovld __cnfn convert_long3_sat(int3); +long3 __ovld __cnfn convert_long3_rte(uint3); +long3 __ovld __cnfn convert_long3_sat_rte(uint3); +long3 __ovld __cnfn convert_long3_rtz(uint3); +long3 __ovld __cnfn convert_long3_sat_rtz(uint3); +long3 __ovld __cnfn convert_long3_rtp(uint3); +long3 __ovld __cnfn convert_long3_sat_rtp(uint3); +long3 __ovld __cnfn convert_long3_rtn(uint3); +long3 __ovld __cnfn convert_long3_sat_rtn(uint3); +long3 __ovld __cnfn convert_long3(uint3); +long3 __ovld __cnfn convert_long3_sat(uint3); +long3 __ovld __cnfn convert_long3_rte(long3); +long3 __ovld __cnfn convert_long3_sat_rte(long3); +long3 __ovld __cnfn convert_long3_rtz(long3); +long3 __ovld __cnfn convert_long3_sat_rtz(long3); +long3 __ovld __cnfn convert_long3_rtp(long3); +long3 __ovld __cnfn convert_long3_sat_rtp(long3); +long3 __ovld __cnfn convert_long3_rtn(long3); +long3 __ovld __cnfn convert_long3_sat_rtn(long3); +long3 __ovld __cnfn convert_long3(long3); +long3 __ovld __cnfn convert_long3_sat(long3); +long3 __ovld __cnfn convert_long3_rte(ulong3); +long3 __ovld __cnfn convert_long3_sat_rte(ulong3); +long3 __ovld __cnfn convert_long3_rtz(ulong3); +long3 __ovld __cnfn convert_long3_sat_rtz(ulong3); +long3 __ovld __cnfn convert_long3_rtp(ulong3); +long3 __ovld __cnfn convert_long3_sat_rtp(ulong3); +long3 __ovld __cnfn convert_long3_rtn(ulong3); +long3 __ovld __cnfn convert_long3_sat_rtn(ulong3); +long3 __ovld __cnfn convert_long3(ulong3); +long3 __ovld __cnfn convert_long3_sat(ulong3); +long3 __ovld __cnfn convert_long3_rte(float3); +long3 __ovld __cnfn convert_long3_sat_rte(float3); +long3 __ovld __cnfn convert_long3_rtz(float3); +long3 __ovld __cnfn convert_long3_sat_rtz(float3); +long3 __ovld __cnfn convert_long3_rtp(float3); +long3 __ovld __cnfn convert_long3_sat_rtp(float3); +long3 __ovld __cnfn convert_long3_rtn(float3); +long3 __ovld __cnfn convert_long3_sat_rtn(float3); +long3 __ovld __cnfn convert_long3(float3); +long3 __ovld __cnfn convert_long3_sat(float3); +ulong3 __ovld __cnfn convert_ulong3_rte(char3); +ulong3 __ovld __cnfn convert_ulong3_sat_rte(char3); +ulong3 __ovld __cnfn convert_ulong3_rtz(char3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtz(char3); +ulong3 __ovld __cnfn convert_ulong3_rtp(char3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtp(char3); +ulong3 __ovld __cnfn convert_ulong3_rtn(char3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtn(char3); +ulong3 __ovld __cnfn convert_ulong3(char3); +ulong3 __ovld __cnfn convert_ulong3_sat(char3); +ulong3 __ovld __cnfn convert_ulong3_rte(uchar3); +ulong3 __ovld __cnfn convert_ulong3_sat_rte(uchar3); +ulong3 __ovld __cnfn convert_ulong3_rtz(uchar3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtz(uchar3); +ulong3 __ovld __cnfn convert_ulong3_rtp(uchar3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtp(uchar3); +ulong3 __ovld __cnfn convert_ulong3_rtn(uchar3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtn(uchar3); +ulong3 __ovld __cnfn convert_ulong3(uchar3); +ulong3 __ovld __cnfn convert_ulong3_sat(uchar3); +ulong3 __ovld __cnfn convert_ulong3_rte(short3); +ulong3 __ovld __cnfn convert_ulong3_sat_rte(short3); +ulong3 __ovld __cnfn convert_ulong3_rtz(short3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtz(short3); +ulong3 __ovld __cnfn convert_ulong3_rtp(short3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtp(short3); +ulong3 __ovld __cnfn convert_ulong3_rtn(short3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtn(short3); +ulong3 __ovld __cnfn convert_ulong3(short3); +ulong3 __ovld __cnfn convert_ulong3_sat(short3); +ulong3 __ovld __cnfn convert_ulong3_rte(ushort3); +ulong3 __ovld __cnfn convert_ulong3_sat_rte(ushort3); +ulong3 __ovld __cnfn convert_ulong3_rtz(ushort3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtz(ushort3); +ulong3 __ovld __cnfn convert_ulong3_rtp(ushort3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtp(ushort3); +ulong3 __ovld __cnfn convert_ulong3_rtn(ushort3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtn(ushort3); +ulong3 __ovld __cnfn convert_ulong3(ushort3); +ulong3 __ovld __cnfn convert_ulong3_sat(ushort3); +ulong3 __ovld __cnfn convert_ulong3_rte(int3); +ulong3 __ovld __cnfn convert_ulong3_sat_rte(int3); +ulong3 __ovld __cnfn convert_ulong3_rtz(int3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtz(int3); +ulong3 __ovld __cnfn convert_ulong3_rtp(int3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtp(int3); +ulong3 __ovld __cnfn convert_ulong3_rtn(int3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtn(int3); +ulong3 __ovld __cnfn convert_ulong3(int3); +ulong3 __ovld __cnfn convert_ulong3_sat(int3); +ulong3 __ovld __cnfn convert_ulong3_rte(uint3); +ulong3 __ovld __cnfn convert_ulong3_sat_rte(uint3); +ulong3 __ovld __cnfn convert_ulong3_rtz(uint3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtz(uint3); +ulong3 __ovld __cnfn convert_ulong3_rtp(uint3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtp(uint3); +ulong3 __ovld __cnfn convert_ulong3_rtn(uint3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtn(uint3); +ulong3 __ovld __cnfn convert_ulong3(uint3); +ulong3 __ovld __cnfn convert_ulong3_sat(uint3); +ulong3 __ovld __cnfn convert_ulong3_rte(long3); +ulong3 __ovld __cnfn convert_ulong3_sat_rte(long3); +ulong3 __ovld __cnfn convert_ulong3_rtz(long3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtz(long3); +ulong3 __ovld __cnfn convert_ulong3_rtp(long3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtp(long3); +ulong3 __ovld __cnfn convert_ulong3_rtn(long3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtn(long3); +ulong3 __ovld __cnfn convert_ulong3(long3); +ulong3 __ovld __cnfn convert_ulong3_sat(long3); +ulong3 __ovld __cnfn convert_ulong3_rte(ulong3); +ulong3 __ovld __cnfn convert_ulong3_sat_rte(ulong3); +ulong3 __ovld __cnfn convert_ulong3_rtz(ulong3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtz(ulong3); +ulong3 __ovld __cnfn convert_ulong3_rtp(ulong3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtp(ulong3); +ulong3 __ovld __cnfn convert_ulong3_rtn(ulong3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtn(ulong3); +ulong3 __ovld __cnfn convert_ulong3(ulong3); +ulong3 __ovld __cnfn convert_ulong3_sat(ulong3); +ulong3 __ovld __cnfn convert_ulong3_rte(float3); +ulong3 __ovld __cnfn convert_ulong3_sat_rte(float3); +ulong3 __ovld __cnfn convert_ulong3_rtz(float3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtz(float3); +ulong3 __ovld __cnfn convert_ulong3_rtp(float3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtp(float3); +ulong3 __ovld __cnfn convert_ulong3_rtn(float3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtn(float3); +ulong3 __ovld __cnfn convert_ulong3(float3); +ulong3 __ovld __cnfn convert_ulong3_sat(float3); +float3 __ovld __cnfn convert_float3_rte(char3); +float3 __ovld __cnfn convert_float3_rtz(char3); +float3 __ovld __cnfn convert_float3_rtp(char3); +float3 __ovld __cnfn convert_float3_rtn(char3); +float3 __ovld __cnfn convert_float3(char3); +float3 __ovld __cnfn convert_float3_rte(uchar3); +float3 __ovld __cnfn convert_float3_rtz(uchar3); +float3 __ovld __cnfn convert_float3_rtp(uchar3); +float3 __ovld __cnfn convert_float3_rtn(uchar3); +float3 __ovld __cnfn convert_float3(uchar3); +float3 __ovld __cnfn convert_float3_rte(short3); +float3 __ovld __cnfn convert_float3_rtz(short3); +float3 __ovld __cnfn convert_float3_rtp(short3); +float3 __ovld __cnfn convert_float3_rtn(short3); +float3 __ovld __cnfn convert_float3(short3); +float3 __ovld __cnfn convert_float3_rte(ushort3); +float3 __ovld __cnfn convert_float3_rtz(ushort3); +float3 __ovld __cnfn convert_float3_rtp(ushort3); +float3 __ovld __cnfn convert_float3_rtn(ushort3); +float3 __ovld __cnfn convert_float3(ushort3); +float3 __ovld __cnfn convert_float3_rte(int3); +float3 __ovld __cnfn convert_float3_rtz(int3); +float3 __ovld __cnfn convert_float3_rtp(int3); +float3 __ovld __cnfn convert_float3_rtn(int3); +float3 __ovld __cnfn convert_float3(int3); +float3 __ovld __cnfn convert_float3_rte(uint3); +float3 __ovld __cnfn convert_float3_rtz(uint3); +float3 __ovld __cnfn convert_float3_rtp(uint3); +float3 __ovld __cnfn convert_float3_rtn(uint3); +float3 __ovld __cnfn convert_float3(uint3); +float3 __ovld __cnfn convert_float3_rte(long3); +float3 __ovld __cnfn convert_float3_rtz(long3); +float3 __ovld __cnfn convert_float3_rtp(long3); +float3 __ovld __cnfn convert_float3_rtn(long3); +float3 __ovld __cnfn convert_float3(long3); +float3 __ovld __cnfn convert_float3_rte(ulong3); +float3 __ovld __cnfn convert_float3_rtz(ulong3); +float3 __ovld __cnfn convert_float3_rtp(ulong3); +float3 __ovld __cnfn convert_float3_rtn(ulong3); +float3 __ovld __cnfn convert_float3(ulong3); +float3 __ovld __cnfn convert_float3_rte(float3); +float3 __ovld __cnfn convert_float3_rtz(float3); +float3 __ovld __cnfn convert_float3_rtp(float3); +float3 __ovld __cnfn convert_float3_rtn(float3); +float3 __ovld __cnfn convert_float3(float3); +char4 __ovld __cnfn convert_char4_rte(char4); +char4 __ovld __cnfn convert_char4_sat_rte(char4); +char4 __ovld __cnfn convert_char4_rtz(char4); +char4 __ovld __cnfn convert_char4_sat_rtz(char4); +char4 __ovld __cnfn convert_char4_rtp(char4); +char4 __ovld __cnfn convert_char4_sat_rtp(char4); +char4 __ovld __cnfn convert_char4_rtn(char4); +char4 __ovld __cnfn convert_char4_sat_rtn(char4); +char4 __ovld __cnfn convert_char4(char4); +char4 __ovld __cnfn convert_char4_sat(char4); +char4 __ovld __cnfn convert_char4_rte(uchar4); +char4 __ovld __cnfn convert_char4_sat_rte(uchar4); +char4 __ovld __cnfn convert_char4_rtz(uchar4); +char4 __ovld __cnfn convert_char4_sat_rtz(uchar4); +char4 __ovld __cnfn convert_char4_rtp(uchar4); +char4 __ovld __cnfn convert_char4_sat_rtp(uchar4); +char4 __ovld __cnfn convert_char4_rtn(uchar4); +char4 __ovld __cnfn convert_char4_sat_rtn(uchar4); +char4 __ovld __cnfn convert_char4(uchar4); +char4 __ovld __cnfn convert_char4_sat(uchar4); +char4 __ovld __cnfn convert_char4_rte(short4); +char4 __ovld __cnfn convert_char4_sat_rte(short4); +char4 __ovld __cnfn convert_char4_rtz(short4); +char4 __ovld __cnfn convert_char4_sat_rtz(short4); +char4 __ovld __cnfn convert_char4_rtp(short4); +char4 __ovld __cnfn convert_char4_sat_rtp(short4); +char4 __ovld __cnfn convert_char4_rtn(short4); +char4 __ovld __cnfn convert_char4_sat_rtn(short4); +char4 __ovld __cnfn convert_char4(short4); +char4 __ovld __cnfn convert_char4_sat(short4); +char4 __ovld __cnfn convert_char4_rte(ushort4); +char4 __ovld __cnfn convert_char4_sat_rte(ushort4); +char4 __ovld __cnfn convert_char4_rtz(ushort4); +char4 __ovld __cnfn convert_char4_sat_rtz(ushort4); +char4 __ovld __cnfn convert_char4_rtp(ushort4); +char4 __ovld __cnfn convert_char4_sat_rtp(ushort4); +char4 __ovld __cnfn convert_char4_rtn(ushort4); +char4 __ovld __cnfn convert_char4_sat_rtn(ushort4); +char4 __ovld __cnfn convert_char4(ushort4); +char4 __ovld __cnfn convert_char4_sat(ushort4); +char4 __ovld __cnfn convert_char4_rte(int4); +char4 __ovld __cnfn convert_char4_sat_rte(int4); +char4 __ovld __cnfn convert_char4_rtz(int4); +char4 __ovld __cnfn convert_char4_sat_rtz(int4); +char4 __ovld __cnfn convert_char4_rtp(int4); +char4 __ovld __cnfn convert_char4_sat_rtp(int4); +char4 __ovld __cnfn convert_char4_rtn(int4); +char4 __ovld __cnfn convert_char4_sat_rtn(int4); +char4 __ovld __cnfn convert_char4(int4); +char4 __ovld __cnfn convert_char4_sat(int4); +char4 __ovld __cnfn convert_char4_rte(uint4); +char4 __ovld __cnfn convert_char4_sat_rte(uint4); +char4 __ovld __cnfn convert_char4_rtz(uint4); +char4 __ovld __cnfn convert_char4_sat_rtz(uint4); +char4 __ovld __cnfn convert_char4_rtp(uint4); +char4 __ovld __cnfn convert_char4_sat_rtp(uint4); +char4 __ovld __cnfn convert_char4_rtn(uint4); +char4 __ovld __cnfn convert_char4_sat_rtn(uint4); +char4 __ovld __cnfn convert_char4(uint4); +char4 __ovld __cnfn convert_char4_sat(uint4); +char4 __ovld __cnfn convert_char4_rte(long4); +char4 __ovld __cnfn convert_char4_sat_rte(long4); +char4 __ovld __cnfn convert_char4_rtz(long4); +char4 __ovld __cnfn convert_char4_sat_rtz(long4); +char4 __ovld __cnfn convert_char4_rtp(long4); +char4 __ovld __cnfn convert_char4_sat_rtp(long4); +char4 __ovld __cnfn convert_char4_rtn(long4); +char4 __ovld __cnfn convert_char4_sat_rtn(long4); +char4 __ovld __cnfn convert_char4(long4); +char4 __ovld __cnfn convert_char4_sat(long4); +char4 __ovld __cnfn convert_char4_rte(ulong4); +char4 __ovld __cnfn convert_char4_sat_rte(ulong4); +char4 __ovld __cnfn convert_char4_rtz(ulong4); +char4 __ovld __cnfn convert_char4_sat_rtz(ulong4); +char4 __ovld __cnfn convert_char4_rtp(ulong4); +char4 __ovld __cnfn convert_char4_sat_rtp(ulong4); +char4 __ovld __cnfn convert_char4_rtn(ulong4); +char4 __ovld __cnfn convert_char4_sat_rtn(ulong4); +char4 __ovld __cnfn convert_char4(ulong4); +char4 __ovld __cnfn convert_char4_sat(ulong4); +char4 __ovld __cnfn convert_char4_rte(float4); +char4 __ovld __cnfn convert_char4_sat_rte(float4); +char4 __ovld __cnfn convert_char4_rtz(float4); +char4 __ovld __cnfn convert_char4_sat_rtz(float4); +char4 __ovld __cnfn convert_char4_rtp(float4); +char4 __ovld __cnfn convert_char4_sat_rtp(float4); +char4 __ovld __cnfn convert_char4_rtn(float4); +char4 __ovld __cnfn convert_char4_sat_rtn(float4); +char4 __ovld __cnfn convert_char4(float4); +char4 __ovld __cnfn convert_char4_sat(float4); +uchar4 __ovld __cnfn convert_uchar4_rte(char4); +uchar4 __ovld __cnfn convert_uchar4_sat_rte(char4); +uchar4 __ovld __cnfn convert_uchar4_rtz(char4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtz(char4); +uchar4 __ovld __cnfn convert_uchar4_rtp(char4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtp(char4); +uchar4 __ovld __cnfn convert_uchar4_rtn(char4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtn(char4); +uchar4 __ovld __cnfn convert_uchar4(char4); +uchar4 __ovld __cnfn convert_uchar4_sat(char4); +uchar4 __ovld __cnfn convert_uchar4_rte(uchar4); +uchar4 __ovld __cnfn convert_uchar4_sat_rte(uchar4); +uchar4 __ovld __cnfn convert_uchar4_rtz(uchar4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtz(uchar4); +uchar4 __ovld __cnfn convert_uchar4_rtp(uchar4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtp(uchar4); +uchar4 __ovld __cnfn convert_uchar4_rtn(uchar4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtn(uchar4); +uchar4 __ovld __cnfn convert_uchar4(uchar4); +uchar4 __ovld __cnfn convert_uchar4_sat(uchar4); +uchar4 __ovld __cnfn convert_uchar4_rte(short4); +uchar4 __ovld __cnfn convert_uchar4_sat_rte(short4); +uchar4 __ovld __cnfn convert_uchar4_rtz(short4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtz(short4); +uchar4 __ovld __cnfn convert_uchar4_rtp(short4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtp(short4); +uchar4 __ovld __cnfn convert_uchar4_rtn(short4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtn(short4); +uchar4 __ovld __cnfn convert_uchar4(short4); +uchar4 __ovld __cnfn convert_uchar4_sat(short4); +uchar4 __ovld __cnfn convert_uchar4_rte(ushort4); +uchar4 __ovld __cnfn convert_uchar4_sat_rte(ushort4); +uchar4 __ovld __cnfn convert_uchar4_rtz(ushort4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtz(ushort4); +uchar4 __ovld __cnfn convert_uchar4_rtp(ushort4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtp(ushort4); +uchar4 __ovld __cnfn convert_uchar4_rtn(ushort4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtn(ushort4); +uchar4 __ovld __cnfn convert_uchar4(ushort4); +uchar4 __ovld __cnfn convert_uchar4_sat(ushort4); +uchar4 __ovld __cnfn convert_uchar4_rte(int4); +uchar4 __ovld __cnfn convert_uchar4_sat_rte(int4); +uchar4 __ovld __cnfn convert_uchar4_rtz(int4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtz(int4); +uchar4 __ovld __cnfn convert_uchar4_rtp(int4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtp(int4); +uchar4 __ovld __cnfn convert_uchar4_rtn(int4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtn(int4); +uchar4 __ovld __cnfn convert_uchar4(int4); +uchar4 __ovld __cnfn convert_uchar4_sat(int4); +uchar4 __ovld __cnfn convert_uchar4_rte(uint4); +uchar4 __ovld __cnfn convert_uchar4_sat_rte(uint4); +uchar4 __ovld __cnfn convert_uchar4_rtz(uint4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtz(uint4); +uchar4 __ovld __cnfn convert_uchar4_rtp(uint4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtp(uint4); +uchar4 __ovld __cnfn convert_uchar4_rtn(uint4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtn(uint4); +uchar4 __ovld __cnfn convert_uchar4(uint4); +uchar4 __ovld __cnfn convert_uchar4_sat(uint4); +uchar4 __ovld __cnfn convert_uchar4_rte(long4); +uchar4 __ovld __cnfn convert_uchar4_sat_rte(long4); +uchar4 __ovld __cnfn convert_uchar4_rtz(long4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtz(long4); +uchar4 __ovld __cnfn convert_uchar4_rtp(long4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtp(long4); +uchar4 __ovld __cnfn convert_uchar4_rtn(long4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtn(long4); +uchar4 __ovld __cnfn convert_uchar4(long4); +uchar4 __ovld __cnfn convert_uchar4_sat(long4); +uchar4 __ovld __cnfn convert_uchar4_rte(ulong4); +uchar4 __ovld __cnfn convert_uchar4_sat_rte(ulong4); +uchar4 __ovld __cnfn convert_uchar4_rtz(ulong4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtz(ulong4); +uchar4 __ovld __cnfn convert_uchar4_rtp(ulong4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtp(ulong4); +uchar4 __ovld __cnfn convert_uchar4_rtn(ulong4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtn(ulong4); +uchar4 __ovld __cnfn convert_uchar4(ulong4); +uchar4 __ovld __cnfn convert_uchar4_sat(ulong4); +uchar4 __ovld __cnfn convert_uchar4_rte(float4); +uchar4 __ovld __cnfn convert_uchar4_sat_rte(float4); +uchar4 __ovld __cnfn convert_uchar4_rtz(float4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtz(float4); +uchar4 __ovld __cnfn convert_uchar4_rtp(float4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtp(float4); +uchar4 __ovld __cnfn convert_uchar4_rtn(float4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtn(float4); +uchar4 __ovld __cnfn convert_uchar4(float4); +uchar4 __ovld __cnfn convert_uchar4_sat(float4); +short4 __ovld __cnfn convert_short4_rte(char4); +short4 __ovld __cnfn convert_short4_sat_rte(char4); +short4 __ovld __cnfn convert_short4_rtz(char4); +short4 __ovld __cnfn convert_short4_sat_rtz(char4); +short4 __ovld __cnfn convert_short4_rtp(char4); +short4 __ovld __cnfn convert_short4_sat_rtp(char4); +short4 __ovld __cnfn convert_short4_rtn(char4); +short4 __ovld __cnfn convert_short4_sat_rtn(char4); +short4 __ovld __cnfn convert_short4(char4); +short4 __ovld __cnfn convert_short4_sat(char4); +short4 __ovld __cnfn convert_short4_rte(uchar4); +short4 __ovld __cnfn convert_short4_sat_rte(uchar4); +short4 __ovld __cnfn convert_short4_rtz(uchar4); +short4 __ovld __cnfn convert_short4_sat_rtz(uchar4); +short4 __ovld __cnfn convert_short4_rtp(uchar4); +short4 __ovld __cnfn convert_short4_sat_rtp(uchar4); +short4 __ovld __cnfn convert_short4_rtn(uchar4); +short4 __ovld __cnfn convert_short4_sat_rtn(uchar4); +short4 __ovld __cnfn convert_short4(uchar4); +short4 __ovld __cnfn convert_short4_sat(uchar4); +short4 __ovld __cnfn convert_short4_rte(short4); +short4 __ovld __cnfn convert_short4_sat_rte(short4); +short4 __ovld __cnfn convert_short4_rtz(short4); +short4 __ovld __cnfn convert_short4_sat_rtz(short4); +short4 __ovld __cnfn convert_short4_rtp(short4); +short4 __ovld __cnfn convert_short4_sat_rtp(short4); +short4 __ovld __cnfn convert_short4_rtn(short4); +short4 __ovld __cnfn convert_short4_sat_rtn(short4); +short4 __ovld __cnfn convert_short4(short4); +short4 __ovld __cnfn convert_short4_sat(short4); +short4 __ovld __cnfn convert_short4_rte(ushort4); +short4 __ovld __cnfn convert_short4_sat_rte(ushort4); +short4 __ovld __cnfn convert_short4_rtz(ushort4); +short4 __ovld __cnfn convert_short4_sat_rtz(ushort4); +short4 __ovld __cnfn convert_short4_rtp(ushort4); +short4 __ovld __cnfn convert_short4_sat_rtp(ushort4); +short4 __ovld __cnfn convert_short4_rtn(ushort4); +short4 __ovld __cnfn convert_short4_sat_rtn(ushort4); +short4 __ovld __cnfn convert_short4(ushort4); +short4 __ovld __cnfn convert_short4_sat(ushort4); +short4 __ovld __cnfn convert_short4_rte(int4); +short4 __ovld __cnfn convert_short4_sat_rte(int4); +short4 __ovld __cnfn convert_short4_rtz(int4); +short4 __ovld __cnfn convert_short4_sat_rtz(int4); +short4 __ovld __cnfn convert_short4_rtp(int4); +short4 __ovld __cnfn convert_short4_sat_rtp(int4); +short4 __ovld __cnfn convert_short4_rtn(int4); +short4 __ovld __cnfn convert_short4_sat_rtn(int4); +short4 __ovld __cnfn convert_short4(int4); +short4 __ovld __cnfn convert_short4_sat(int4); +short4 __ovld __cnfn convert_short4_rte(uint4); +short4 __ovld __cnfn convert_short4_sat_rte(uint4); +short4 __ovld __cnfn convert_short4_rtz(uint4); +short4 __ovld __cnfn convert_short4_sat_rtz(uint4); +short4 __ovld __cnfn convert_short4_rtp(uint4); +short4 __ovld __cnfn convert_short4_sat_rtp(uint4); +short4 __ovld __cnfn convert_short4_rtn(uint4); +short4 __ovld __cnfn convert_short4_sat_rtn(uint4); +short4 __ovld __cnfn convert_short4(uint4); +short4 __ovld __cnfn convert_short4_sat(uint4); +short4 __ovld __cnfn convert_short4_rte(long4); +short4 __ovld __cnfn convert_short4_sat_rte(long4); +short4 __ovld __cnfn convert_short4_rtz(long4); +short4 __ovld __cnfn convert_short4_sat_rtz(long4); +short4 __ovld __cnfn convert_short4_rtp(long4); +short4 __ovld __cnfn convert_short4_sat_rtp(long4); +short4 __ovld __cnfn convert_short4_rtn(long4); +short4 __ovld __cnfn convert_short4_sat_rtn(long4); +short4 __ovld __cnfn convert_short4(long4); +short4 __ovld __cnfn convert_short4_sat(long4); +short4 __ovld __cnfn convert_short4_rte(ulong4); +short4 __ovld __cnfn convert_short4_sat_rte(ulong4); +short4 __ovld __cnfn convert_short4_rtz(ulong4); +short4 __ovld __cnfn convert_short4_sat_rtz(ulong4); +short4 __ovld __cnfn convert_short4_rtp(ulong4); +short4 __ovld __cnfn convert_short4_sat_rtp(ulong4); +short4 __ovld __cnfn convert_short4_rtn(ulong4); +short4 __ovld __cnfn convert_short4_sat_rtn(ulong4); +short4 __ovld __cnfn convert_short4(ulong4); +short4 __ovld __cnfn convert_short4_sat(ulong4); +short4 __ovld __cnfn convert_short4_rte(float4); +short4 __ovld __cnfn convert_short4_sat_rte(float4); +short4 __ovld __cnfn convert_short4_rtz(float4); +short4 __ovld __cnfn convert_short4_sat_rtz(float4); +short4 __ovld __cnfn convert_short4_rtp(float4); +short4 __ovld __cnfn convert_short4_sat_rtp(float4); +short4 __ovld __cnfn convert_short4_rtn(float4); +short4 __ovld __cnfn convert_short4_sat_rtn(float4); +short4 __ovld __cnfn convert_short4(float4); +short4 __ovld __cnfn convert_short4_sat(float4); +ushort4 __ovld __cnfn convert_ushort4_rte(char4); +ushort4 __ovld __cnfn convert_ushort4_sat_rte(char4); +ushort4 __ovld __cnfn convert_ushort4_rtz(char4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtz(char4); +ushort4 __ovld __cnfn convert_ushort4_rtp(char4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtp(char4); +ushort4 __ovld __cnfn convert_ushort4_rtn(char4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtn(char4); +ushort4 __ovld __cnfn convert_ushort4(char4); +ushort4 __ovld __cnfn convert_ushort4_sat(char4); +ushort4 __ovld __cnfn convert_ushort4_rte(uchar4); +ushort4 __ovld __cnfn convert_ushort4_sat_rte(uchar4); +ushort4 __ovld __cnfn convert_ushort4_rtz(uchar4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtz(uchar4); +ushort4 __ovld __cnfn convert_ushort4_rtp(uchar4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtp(uchar4); +ushort4 __ovld __cnfn convert_ushort4_rtn(uchar4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtn(uchar4); +ushort4 __ovld __cnfn convert_ushort4(uchar4); +ushort4 __ovld __cnfn convert_ushort4_sat(uchar4); +ushort4 __ovld __cnfn convert_ushort4_rte(short4); +ushort4 __ovld __cnfn convert_ushort4_sat_rte(short4); +ushort4 __ovld __cnfn convert_ushort4_rtz(short4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtz(short4); +ushort4 __ovld __cnfn convert_ushort4_rtp(short4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtp(short4); +ushort4 __ovld __cnfn convert_ushort4_rtn(short4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtn(short4); +ushort4 __ovld __cnfn convert_ushort4(short4); +ushort4 __ovld __cnfn convert_ushort4_sat(short4); +ushort4 __ovld __cnfn convert_ushort4_rte(ushort4); +ushort4 __ovld __cnfn convert_ushort4_sat_rte(ushort4); +ushort4 __ovld __cnfn convert_ushort4_rtz(ushort4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtz(ushort4); +ushort4 __ovld __cnfn convert_ushort4_rtp(ushort4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtp(ushort4); +ushort4 __ovld __cnfn convert_ushort4_rtn(ushort4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtn(ushort4); +ushort4 __ovld __cnfn convert_ushort4(ushort4); +ushort4 __ovld __cnfn convert_ushort4_sat(ushort4); +ushort4 __ovld __cnfn convert_ushort4_rte(int4); +ushort4 __ovld __cnfn convert_ushort4_sat_rte(int4); +ushort4 __ovld __cnfn convert_ushort4_rtz(int4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtz(int4); +ushort4 __ovld __cnfn convert_ushort4_rtp(int4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtp(int4); +ushort4 __ovld __cnfn convert_ushort4_rtn(int4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtn(int4); +ushort4 __ovld __cnfn convert_ushort4(int4); +ushort4 __ovld __cnfn convert_ushort4_sat(int4); +ushort4 __ovld __cnfn convert_ushort4_rte(uint4); +ushort4 __ovld __cnfn convert_ushort4_sat_rte(uint4); +ushort4 __ovld __cnfn convert_ushort4_rtz(uint4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtz(uint4); +ushort4 __ovld __cnfn convert_ushort4_rtp(uint4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtp(uint4); +ushort4 __ovld __cnfn convert_ushort4_rtn(uint4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtn(uint4); +ushort4 __ovld __cnfn convert_ushort4(uint4); +ushort4 __ovld __cnfn convert_ushort4_sat(uint4); +ushort4 __ovld __cnfn convert_ushort4_rte(long4); +ushort4 __ovld __cnfn convert_ushort4_sat_rte(long4); +ushort4 __ovld __cnfn convert_ushort4_rtz(long4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtz(long4); +ushort4 __ovld __cnfn convert_ushort4_rtp(long4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtp(long4); +ushort4 __ovld __cnfn convert_ushort4_rtn(long4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtn(long4); +ushort4 __ovld __cnfn convert_ushort4(long4); +ushort4 __ovld __cnfn convert_ushort4_sat(long4); +ushort4 __ovld __cnfn convert_ushort4_rte(ulong4); +ushort4 __ovld __cnfn convert_ushort4_sat_rte(ulong4); +ushort4 __ovld __cnfn convert_ushort4_rtz(ulong4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtz(ulong4); +ushort4 __ovld __cnfn convert_ushort4_rtp(ulong4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtp(ulong4); +ushort4 __ovld __cnfn convert_ushort4_rtn(ulong4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtn(ulong4); +ushort4 __ovld __cnfn convert_ushort4(ulong4); +ushort4 __ovld __cnfn convert_ushort4_sat(ulong4); +ushort4 __ovld __cnfn convert_ushort4_rte(float4); +ushort4 __ovld __cnfn convert_ushort4_sat_rte(float4); +ushort4 __ovld __cnfn convert_ushort4_rtz(float4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtz(float4); +ushort4 __ovld __cnfn convert_ushort4_rtp(float4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtp(float4); +ushort4 __ovld __cnfn convert_ushort4_rtn(float4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtn(float4); +ushort4 __ovld __cnfn convert_ushort4(float4); +ushort4 __ovld __cnfn convert_ushort4_sat(float4); +int4 __ovld __cnfn convert_int4_rte(char4); +int4 __ovld __cnfn convert_int4_sat_rte(char4); +int4 __ovld __cnfn convert_int4_rtz(char4); +int4 __ovld __cnfn convert_int4_sat_rtz(char4); +int4 __ovld __cnfn convert_int4_rtp(char4); +int4 __ovld __cnfn convert_int4_sat_rtp(char4); +int4 __ovld __cnfn convert_int4_rtn(char4); +int4 __ovld __cnfn convert_int4_sat_rtn(char4); +int4 __ovld __cnfn convert_int4(char4); +int4 __ovld __cnfn convert_int4_sat(char4); +int4 __ovld __cnfn convert_int4_rte(uchar4); +int4 __ovld __cnfn convert_int4_sat_rte(uchar4); +int4 __ovld __cnfn convert_int4_rtz(uchar4); +int4 __ovld __cnfn convert_int4_sat_rtz(uchar4); +int4 __ovld __cnfn convert_int4_rtp(uchar4); +int4 __ovld __cnfn convert_int4_sat_rtp(uchar4); +int4 __ovld __cnfn convert_int4_rtn(uchar4); +int4 __ovld __cnfn convert_int4_sat_rtn(uchar4); +int4 __ovld __cnfn convert_int4(uchar4); +int4 __ovld __cnfn convert_int4_sat(uchar4); +int4 __ovld __cnfn convert_int4_rte(short4); +int4 __ovld __cnfn convert_int4_sat_rte(short4); +int4 __ovld __cnfn convert_int4_rtz(short4); +int4 __ovld __cnfn convert_int4_sat_rtz(short4); +int4 __ovld __cnfn convert_int4_rtp(short4); +int4 __ovld __cnfn convert_int4_sat_rtp(short4); +int4 __ovld __cnfn convert_int4_rtn(short4); +int4 __ovld __cnfn convert_int4_sat_rtn(short4); +int4 __ovld __cnfn convert_int4(short4); +int4 __ovld __cnfn convert_int4_sat(short4); +int4 __ovld __cnfn convert_int4_rte(ushort4); +int4 __ovld __cnfn convert_int4_sat_rte(ushort4); +int4 __ovld __cnfn convert_int4_rtz(ushort4); +int4 __ovld __cnfn convert_int4_sat_rtz(ushort4); +int4 __ovld __cnfn convert_int4_rtp(ushort4); +int4 __ovld __cnfn convert_int4_sat_rtp(ushort4); +int4 __ovld __cnfn convert_int4_rtn(ushort4); +int4 __ovld __cnfn convert_int4_sat_rtn(ushort4); +int4 __ovld __cnfn convert_int4(ushort4); +int4 __ovld __cnfn convert_int4_sat(ushort4); +int4 __ovld __cnfn convert_int4_rte(int4); +int4 __ovld __cnfn convert_int4_sat_rte(int4); +int4 __ovld __cnfn convert_int4_rtz(int4); +int4 __ovld __cnfn convert_int4_sat_rtz(int4); +int4 __ovld __cnfn convert_int4_rtp(int4); +int4 __ovld __cnfn convert_int4_sat_rtp(int4); +int4 __ovld __cnfn convert_int4_rtn(int4); +int4 __ovld __cnfn convert_int4_sat_rtn(int4); +int4 __ovld __cnfn convert_int4(int4); +int4 __ovld __cnfn convert_int4_sat(int4); +int4 __ovld __cnfn convert_int4_rte(uint4); +int4 __ovld __cnfn convert_int4_sat_rte(uint4); +int4 __ovld __cnfn convert_int4_rtz(uint4); +int4 __ovld __cnfn convert_int4_sat_rtz(uint4); +int4 __ovld __cnfn convert_int4_rtp(uint4); +int4 __ovld __cnfn convert_int4_sat_rtp(uint4); +int4 __ovld __cnfn convert_int4_rtn(uint4); +int4 __ovld __cnfn convert_int4_sat_rtn(uint4); +int4 __ovld __cnfn convert_int4(uint4); +int4 __ovld __cnfn convert_int4_sat(uint4); +int4 __ovld __cnfn convert_int4_rte(long4); +int4 __ovld __cnfn convert_int4_sat_rte(long4); +int4 __ovld __cnfn convert_int4_rtz(long4); +int4 __ovld __cnfn convert_int4_sat_rtz(long4); +int4 __ovld __cnfn convert_int4_rtp(long4); +int4 __ovld __cnfn convert_int4_sat_rtp(long4); +int4 __ovld __cnfn convert_int4_rtn(long4); +int4 __ovld __cnfn convert_int4_sat_rtn(long4); +int4 __ovld __cnfn convert_int4(long4); +int4 __ovld __cnfn convert_int4_sat(long4); +int4 __ovld __cnfn convert_int4_rte(ulong4); +int4 __ovld __cnfn convert_int4_sat_rte(ulong4); +int4 __ovld __cnfn convert_int4_rtz(ulong4); +int4 __ovld __cnfn convert_int4_sat_rtz(ulong4); +int4 __ovld __cnfn convert_int4_rtp(ulong4); +int4 __ovld __cnfn convert_int4_sat_rtp(ulong4); +int4 __ovld __cnfn convert_int4_rtn(ulong4); +int4 __ovld __cnfn convert_int4_sat_rtn(ulong4); +int4 __ovld __cnfn convert_int4(ulong4); +int4 __ovld __cnfn convert_int4_sat(ulong4); +int4 __ovld __cnfn convert_int4_rte(float4); +int4 __ovld __cnfn convert_int4_sat_rte(float4); +int4 __ovld __cnfn convert_int4_rtz(float4); +int4 __ovld __cnfn convert_int4_sat_rtz(float4); +int4 __ovld __cnfn convert_int4_rtp(float4); +int4 __ovld __cnfn convert_int4_sat_rtp(float4); +int4 __ovld __cnfn convert_int4_rtn(float4); +int4 __ovld __cnfn convert_int4_sat_rtn(float4); +int4 __ovld __cnfn convert_int4(float4); +int4 __ovld __cnfn convert_int4_sat(float4); +uint4 __ovld __cnfn convert_uint4_rte(char4); +uint4 __ovld __cnfn convert_uint4_sat_rte(char4); +uint4 __ovld __cnfn convert_uint4_rtz(char4); +uint4 __ovld __cnfn convert_uint4_sat_rtz(char4); +uint4 __ovld __cnfn convert_uint4_rtp(char4); +uint4 __ovld __cnfn convert_uint4_sat_rtp(char4); +uint4 __ovld __cnfn convert_uint4_rtn(char4); +uint4 __ovld __cnfn convert_uint4_sat_rtn(char4); +uint4 __ovld __cnfn convert_uint4(char4); +uint4 __ovld __cnfn convert_uint4_sat(char4); +uint4 __ovld __cnfn convert_uint4_rte(uchar4); +uint4 __ovld __cnfn convert_uint4_sat_rte(uchar4); +uint4 __ovld __cnfn convert_uint4_rtz(uchar4); +uint4 __ovld __cnfn convert_uint4_sat_rtz(uchar4); +uint4 __ovld __cnfn convert_uint4_rtp(uchar4); +uint4 __ovld __cnfn convert_uint4_sat_rtp(uchar4); +uint4 __ovld __cnfn convert_uint4_rtn(uchar4); +uint4 __ovld __cnfn convert_uint4_sat_rtn(uchar4); +uint4 __ovld __cnfn convert_uint4(uchar4); +uint4 __ovld __cnfn convert_uint4_sat(uchar4); +uint4 __ovld __cnfn convert_uint4_rte(short4); +uint4 __ovld __cnfn convert_uint4_sat_rte(short4); +uint4 __ovld __cnfn convert_uint4_rtz(short4); +uint4 __ovld __cnfn convert_uint4_sat_rtz(short4); +uint4 __ovld __cnfn convert_uint4_rtp(short4); +uint4 __ovld __cnfn convert_uint4_sat_rtp(short4); +uint4 __ovld __cnfn convert_uint4_rtn(short4); +uint4 __ovld __cnfn convert_uint4_sat_rtn(short4); +uint4 __ovld __cnfn convert_uint4(short4); +uint4 __ovld __cnfn convert_uint4_sat(short4); +uint4 __ovld __cnfn convert_uint4_rte(ushort4); +uint4 __ovld __cnfn convert_uint4_sat_rte(ushort4); +uint4 __ovld __cnfn convert_uint4_rtz(ushort4); +uint4 __ovld __cnfn convert_uint4_sat_rtz(ushort4); +uint4 __ovld __cnfn convert_uint4_rtp(ushort4); +uint4 __ovld __cnfn convert_uint4_sat_rtp(ushort4); +uint4 __ovld __cnfn convert_uint4_rtn(ushort4); +uint4 __ovld __cnfn convert_uint4_sat_rtn(ushort4); +uint4 __ovld __cnfn convert_uint4(ushort4); +uint4 __ovld __cnfn convert_uint4_sat(ushort4); +uint4 __ovld __cnfn convert_uint4_rte(int4); +uint4 __ovld __cnfn convert_uint4_sat_rte(int4); +uint4 __ovld __cnfn convert_uint4_rtz(int4); +uint4 __ovld __cnfn convert_uint4_sat_rtz(int4); +uint4 __ovld __cnfn convert_uint4_rtp(int4); +uint4 __ovld __cnfn convert_uint4_sat_rtp(int4); +uint4 __ovld __cnfn convert_uint4_rtn(int4); +uint4 __ovld __cnfn convert_uint4_sat_rtn(int4); +uint4 __ovld __cnfn convert_uint4(int4); +uint4 __ovld __cnfn convert_uint4_sat(int4); +uint4 __ovld __cnfn convert_uint4_rte(uint4); +uint4 __ovld __cnfn convert_uint4_sat_rte(uint4); +uint4 __ovld __cnfn convert_uint4_rtz(uint4); +uint4 __ovld __cnfn convert_uint4_sat_rtz(uint4); +uint4 __ovld __cnfn convert_uint4_rtp(uint4); +uint4 __ovld __cnfn convert_uint4_sat_rtp(uint4); +uint4 __ovld __cnfn convert_uint4_rtn(uint4); +uint4 __ovld __cnfn convert_uint4_sat_rtn(uint4); +uint4 __ovld __cnfn convert_uint4(uint4); +uint4 __ovld __cnfn convert_uint4_sat(uint4); +uint4 __ovld __cnfn convert_uint4_rte(long4); +uint4 __ovld __cnfn convert_uint4_sat_rte(long4); +uint4 __ovld __cnfn convert_uint4_rtz(long4); +uint4 __ovld __cnfn convert_uint4_sat_rtz(long4); +uint4 __ovld __cnfn convert_uint4_rtp(long4); +uint4 __ovld __cnfn convert_uint4_sat_rtp(long4); +uint4 __ovld __cnfn convert_uint4_rtn(long4); +uint4 __ovld __cnfn convert_uint4_sat_rtn(long4); +uint4 __ovld __cnfn convert_uint4(long4); +uint4 __ovld __cnfn convert_uint4_sat(long4); +uint4 __ovld __cnfn convert_uint4_rte(ulong4); +uint4 __ovld __cnfn convert_uint4_sat_rte(ulong4); +uint4 __ovld __cnfn convert_uint4_rtz(ulong4); +uint4 __ovld __cnfn convert_uint4_sat_rtz(ulong4); +uint4 __ovld __cnfn convert_uint4_rtp(ulong4); +uint4 __ovld __cnfn convert_uint4_sat_rtp(ulong4); +uint4 __ovld __cnfn convert_uint4_rtn(ulong4); +uint4 __ovld __cnfn convert_uint4_sat_rtn(ulong4); +uint4 __ovld __cnfn convert_uint4(ulong4); +uint4 __ovld __cnfn convert_uint4_sat(ulong4); +uint4 __ovld __cnfn convert_uint4_rte(float4); +uint4 __ovld __cnfn convert_uint4_sat_rte(float4); +uint4 __ovld __cnfn convert_uint4_rtz(float4); +uint4 __ovld __cnfn convert_uint4_sat_rtz(float4); +uint4 __ovld __cnfn convert_uint4_rtp(float4); +uint4 __ovld __cnfn convert_uint4_sat_rtp(float4); +uint4 __ovld __cnfn convert_uint4_rtn(float4); +uint4 __ovld __cnfn convert_uint4_sat_rtn(float4); +uint4 __ovld __cnfn convert_uint4(float4); +uint4 __ovld __cnfn convert_uint4_sat(float4); +long4 __ovld __cnfn convert_long4_rte(char4); +long4 __ovld __cnfn convert_long4_sat_rte(char4); +long4 __ovld __cnfn convert_long4_rtz(char4); +long4 __ovld __cnfn convert_long4_sat_rtz(char4); +long4 __ovld __cnfn convert_long4_rtp(char4); +long4 __ovld __cnfn convert_long4_sat_rtp(char4); +long4 __ovld __cnfn convert_long4_rtn(char4); +long4 __ovld __cnfn convert_long4_sat_rtn(char4); +long4 __ovld __cnfn convert_long4(char4); +long4 __ovld __cnfn convert_long4_sat(char4); +long4 __ovld __cnfn convert_long4_rte(uchar4); +long4 __ovld __cnfn convert_long4_sat_rte(uchar4); +long4 __ovld __cnfn convert_long4_rtz(uchar4); +long4 __ovld __cnfn convert_long4_sat_rtz(uchar4); +long4 __ovld __cnfn convert_long4_rtp(uchar4); +long4 __ovld __cnfn convert_long4_sat_rtp(uchar4); +long4 __ovld __cnfn convert_long4_rtn(uchar4); +long4 __ovld __cnfn convert_long4_sat_rtn(uchar4); +long4 __ovld __cnfn convert_long4(uchar4); +long4 __ovld __cnfn convert_long4_sat(uchar4); +long4 __ovld __cnfn convert_long4_rte(short4); +long4 __ovld __cnfn convert_long4_sat_rte(short4); +long4 __ovld __cnfn convert_long4_rtz(short4); +long4 __ovld __cnfn convert_long4_sat_rtz(short4); +long4 __ovld __cnfn convert_long4_rtp(short4); +long4 __ovld __cnfn convert_long4_sat_rtp(short4); +long4 __ovld __cnfn convert_long4_rtn(short4); +long4 __ovld __cnfn convert_long4_sat_rtn(short4); +long4 __ovld __cnfn convert_long4(short4); +long4 __ovld __cnfn convert_long4_sat(short4); +long4 __ovld __cnfn convert_long4_rte(ushort4); +long4 __ovld __cnfn convert_long4_sat_rte(ushort4); +long4 __ovld __cnfn convert_long4_rtz(ushort4); +long4 __ovld __cnfn convert_long4_sat_rtz(ushort4); +long4 __ovld __cnfn convert_long4_rtp(ushort4); +long4 __ovld __cnfn convert_long4_sat_rtp(ushort4); +long4 __ovld __cnfn convert_long4_rtn(ushort4); +long4 __ovld __cnfn convert_long4_sat_rtn(ushort4); +long4 __ovld __cnfn convert_long4(ushort4); +long4 __ovld __cnfn convert_long4_sat(ushort4); +long4 __ovld __cnfn convert_long4_rte(int4); +long4 __ovld __cnfn convert_long4_sat_rte(int4); +long4 __ovld __cnfn convert_long4_rtz(int4); +long4 __ovld __cnfn convert_long4_sat_rtz(int4); +long4 __ovld __cnfn convert_long4_rtp(int4); +long4 __ovld __cnfn convert_long4_sat_rtp(int4); +long4 __ovld __cnfn convert_long4_rtn(int4); +long4 __ovld __cnfn convert_long4_sat_rtn(int4); +long4 __ovld __cnfn convert_long4(int4); +long4 __ovld __cnfn convert_long4_sat(int4); +long4 __ovld __cnfn convert_long4_rte(uint4); +long4 __ovld __cnfn convert_long4_sat_rte(uint4); +long4 __ovld __cnfn convert_long4_rtz(uint4); +long4 __ovld __cnfn convert_long4_sat_rtz(uint4); +long4 __ovld __cnfn convert_long4_rtp(uint4); +long4 __ovld __cnfn convert_long4_sat_rtp(uint4); +long4 __ovld __cnfn convert_long4_rtn(uint4); +long4 __ovld __cnfn convert_long4_sat_rtn(uint4); +long4 __ovld __cnfn convert_long4(uint4); +long4 __ovld __cnfn convert_long4_sat(uint4); +long4 __ovld __cnfn convert_long4_rte(long4); +long4 __ovld __cnfn convert_long4_sat_rte(long4); +long4 __ovld __cnfn convert_long4_rtz(long4); +long4 __ovld __cnfn convert_long4_sat_rtz(long4); +long4 __ovld __cnfn convert_long4_rtp(long4); +long4 __ovld __cnfn convert_long4_sat_rtp(long4); +long4 __ovld __cnfn convert_long4_rtn(long4); +long4 __ovld __cnfn convert_long4_sat_rtn(long4); +long4 __ovld __cnfn convert_long4(long4); +long4 __ovld __cnfn convert_long4_sat(long4); +long4 __ovld __cnfn convert_long4_rte(ulong4); +long4 __ovld __cnfn convert_long4_sat_rte(ulong4); +long4 __ovld __cnfn convert_long4_rtz(ulong4); +long4 __ovld __cnfn convert_long4_sat_rtz(ulong4); +long4 __ovld __cnfn convert_long4_rtp(ulong4); +long4 __ovld __cnfn convert_long4_sat_rtp(ulong4); +long4 __ovld __cnfn convert_long4_rtn(ulong4); +long4 __ovld __cnfn convert_long4_sat_rtn(ulong4); +long4 __ovld __cnfn convert_long4(ulong4); +long4 __ovld __cnfn convert_long4_sat(ulong4); +long4 __ovld __cnfn convert_long4_rte(float4); +long4 __ovld __cnfn convert_long4_sat_rte(float4); +long4 __ovld __cnfn convert_long4_rtz(float4); +long4 __ovld __cnfn convert_long4_sat_rtz(float4); +long4 __ovld __cnfn convert_long4_rtp(float4); +long4 __ovld __cnfn convert_long4_sat_rtp(float4); +long4 __ovld __cnfn convert_long4_rtn(float4); +long4 __ovld __cnfn convert_long4_sat_rtn(float4); +long4 __ovld __cnfn convert_long4(float4); +long4 __ovld __cnfn convert_long4_sat(float4); +ulong4 __ovld __cnfn convert_ulong4_rte(char4); +ulong4 __ovld __cnfn convert_ulong4_sat_rte(char4); +ulong4 __ovld __cnfn convert_ulong4_rtz(char4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtz(char4); +ulong4 __ovld __cnfn convert_ulong4_rtp(char4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtp(char4); +ulong4 __ovld __cnfn convert_ulong4_rtn(char4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtn(char4); +ulong4 __ovld __cnfn convert_ulong4(char4); +ulong4 __ovld __cnfn convert_ulong4_sat(char4); +ulong4 __ovld __cnfn convert_ulong4_rte(uchar4); +ulong4 __ovld __cnfn convert_ulong4_sat_rte(uchar4); +ulong4 __ovld __cnfn convert_ulong4_rtz(uchar4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtz(uchar4); +ulong4 __ovld __cnfn convert_ulong4_rtp(uchar4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtp(uchar4); +ulong4 __ovld __cnfn convert_ulong4_rtn(uchar4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtn(uchar4); +ulong4 __ovld __cnfn convert_ulong4(uchar4); +ulong4 __ovld __cnfn convert_ulong4_sat(uchar4); +ulong4 __ovld __cnfn convert_ulong4_rte(short4); +ulong4 __ovld __cnfn convert_ulong4_sat_rte(short4); +ulong4 __ovld __cnfn convert_ulong4_rtz(short4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtz(short4); +ulong4 __ovld __cnfn convert_ulong4_rtp(short4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtp(short4); +ulong4 __ovld __cnfn convert_ulong4_rtn(short4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtn(short4); +ulong4 __ovld __cnfn convert_ulong4(short4); +ulong4 __ovld __cnfn convert_ulong4_sat(short4); +ulong4 __ovld __cnfn convert_ulong4_rte(ushort4); +ulong4 __ovld __cnfn convert_ulong4_sat_rte(ushort4); +ulong4 __ovld __cnfn convert_ulong4_rtz(ushort4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtz(ushort4); +ulong4 __ovld __cnfn convert_ulong4_rtp(ushort4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtp(ushort4); +ulong4 __ovld __cnfn convert_ulong4_rtn(ushort4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtn(ushort4); +ulong4 __ovld __cnfn convert_ulong4(ushort4); +ulong4 __ovld __cnfn convert_ulong4_sat(ushort4); +ulong4 __ovld __cnfn convert_ulong4_rte(int4); +ulong4 __ovld __cnfn convert_ulong4_sat_rte(int4); +ulong4 __ovld __cnfn convert_ulong4_rtz(int4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtz(int4); +ulong4 __ovld __cnfn convert_ulong4_rtp(int4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtp(int4); +ulong4 __ovld __cnfn convert_ulong4_rtn(int4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtn(int4); +ulong4 __ovld __cnfn convert_ulong4(int4); +ulong4 __ovld __cnfn convert_ulong4_sat(int4); +ulong4 __ovld __cnfn convert_ulong4_rte(uint4); +ulong4 __ovld __cnfn convert_ulong4_sat_rte(uint4); +ulong4 __ovld __cnfn convert_ulong4_rtz(uint4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtz(uint4); +ulong4 __ovld __cnfn convert_ulong4_rtp(uint4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtp(uint4); +ulong4 __ovld __cnfn convert_ulong4_rtn(uint4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtn(uint4); +ulong4 __ovld __cnfn convert_ulong4(uint4); +ulong4 __ovld __cnfn convert_ulong4_sat(uint4); +ulong4 __ovld __cnfn convert_ulong4_rte(long4); +ulong4 __ovld __cnfn convert_ulong4_sat_rte(long4); +ulong4 __ovld __cnfn convert_ulong4_rtz(long4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtz(long4); +ulong4 __ovld __cnfn convert_ulong4_rtp(long4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtp(long4); +ulong4 __ovld __cnfn convert_ulong4_rtn(long4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtn(long4); +ulong4 __ovld __cnfn convert_ulong4(long4); +ulong4 __ovld __cnfn convert_ulong4_sat(long4); +ulong4 __ovld __cnfn convert_ulong4_rte(ulong4); +ulong4 __ovld __cnfn convert_ulong4_sat_rte(ulong4); +ulong4 __ovld __cnfn convert_ulong4_rtz(ulong4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtz(ulong4); +ulong4 __ovld __cnfn convert_ulong4_rtp(ulong4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtp(ulong4); +ulong4 __ovld __cnfn convert_ulong4_rtn(ulong4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtn(ulong4); +ulong4 __ovld __cnfn convert_ulong4(ulong4); +ulong4 __ovld __cnfn convert_ulong4_sat(ulong4); +ulong4 __ovld __cnfn convert_ulong4_rte(float4); +ulong4 __ovld __cnfn convert_ulong4_sat_rte(float4); +ulong4 __ovld __cnfn convert_ulong4_rtz(float4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtz(float4); +ulong4 __ovld __cnfn convert_ulong4_rtp(float4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtp(float4); +ulong4 __ovld __cnfn convert_ulong4_rtn(float4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtn(float4); +ulong4 __ovld __cnfn convert_ulong4(float4); +ulong4 __ovld __cnfn convert_ulong4_sat(float4); +float4 __ovld __cnfn convert_float4_rte(char4); +float4 __ovld __cnfn convert_float4_rtz(char4); +float4 __ovld __cnfn convert_float4_rtp(char4); +float4 __ovld __cnfn convert_float4_rtn(char4); +float4 __ovld __cnfn convert_float4(char4); +float4 __ovld __cnfn convert_float4_rte(uchar4); +float4 __ovld __cnfn convert_float4_rtz(uchar4); +float4 __ovld __cnfn convert_float4_rtp(uchar4); +float4 __ovld __cnfn convert_float4_rtn(uchar4); +float4 __ovld __cnfn convert_float4(uchar4); +float4 __ovld __cnfn convert_float4_rte(short4); +float4 __ovld __cnfn convert_float4_rtz(short4); +float4 __ovld __cnfn convert_float4_rtp(short4); +float4 __ovld __cnfn convert_float4_rtn(short4); +float4 __ovld __cnfn convert_float4(short4); +float4 __ovld __cnfn convert_float4_rte(ushort4); +float4 __ovld __cnfn convert_float4_rtz(ushort4); +float4 __ovld __cnfn convert_float4_rtp(ushort4); +float4 __ovld __cnfn convert_float4_rtn(ushort4); +float4 __ovld __cnfn convert_float4(ushort4); +float4 __ovld __cnfn convert_float4_rte(int4); +float4 __ovld __cnfn convert_float4_rtz(int4); +float4 __ovld __cnfn convert_float4_rtp(int4); +float4 __ovld __cnfn convert_float4_rtn(int4); +float4 __ovld __cnfn convert_float4(int4); +float4 __ovld __cnfn convert_float4_rte(uint4); +float4 __ovld __cnfn convert_float4_rtz(uint4); +float4 __ovld __cnfn convert_float4_rtp(uint4); +float4 __ovld __cnfn convert_float4_rtn(uint4); +float4 __ovld __cnfn convert_float4(uint4); +float4 __ovld __cnfn convert_float4_rte(long4); +float4 __ovld __cnfn convert_float4_rtz(long4); +float4 __ovld __cnfn convert_float4_rtp(long4); +float4 __ovld __cnfn convert_float4_rtn(long4); +float4 __ovld __cnfn convert_float4(long4); +float4 __ovld __cnfn convert_float4_rte(ulong4); +float4 __ovld __cnfn convert_float4_rtz(ulong4); +float4 __ovld __cnfn convert_float4_rtp(ulong4); +float4 __ovld __cnfn convert_float4_rtn(ulong4); +float4 __ovld __cnfn convert_float4(ulong4); +float4 __ovld __cnfn convert_float4_rte(float4); +float4 __ovld __cnfn convert_float4_rtz(float4); +float4 __ovld __cnfn convert_float4_rtp(float4); +float4 __ovld __cnfn convert_float4_rtn(float4); +float4 __ovld __cnfn convert_float4(float4); +char8 __ovld __cnfn convert_char8_rte(char8); +char8 __ovld __cnfn convert_char8_sat_rte(char8); +char8 __ovld __cnfn convert_char8_rtz(char8); +char8 __ovld __cnfn convert_char8_sat_rtz(char8); +char8 __ovld __cnfn convert_char8_rtp(char8); +char8 __ovld __cnfn convert_char8_sat_rtp(char8); +char8 __ovld __cnfn convert_char8_rtn(char8); +char8 __ovld __cnfn convert_char8_sat_rtn(char8); +char8 __ovld __cnfn convert_char8(char8); +char8 __ovld __cnfn convert_char8_sat(char8); +char8 __ovld __cnfn convert_char8_rte(uchar8); +char8 __ovld __cnfn convert_char8_sat_rte(uchar8); +char8 __ovld __cnfn convert_char8_rtz(uchar8); +char8 __ovld __cnfn convert_char8_sat_rtz(uchar8); +char8 __ovld __cnfn convert_char8_rtp(uchar8); +char8 __ovld __cnfn convert_char8_sat_rtp(uchar8); +char8 __ovld __cnfn convert_char8_rtn(uchar8); +char8 __ovld __cnfn convert_char8_sat_rtn(uchar8); +char8 __ovld __cnfn convert_char8(uchar8); +char8 __ovld __cnfn convert_char8_sat(uchar8); +char8 __ovld __cnfn convert_char8_rte(short8); +char8 __ovld __cnfn convert_char8_sat_rte(short8); +char8 __ovld __cnfn convert_char8_rtz(short8); +char8 __ovld __cnfn convert_char8_sat_rtz(short8); +char8 __ovld __cnfn convert_char8_rtp(short8); +char8 __ovld __cnfn convert_char8_sat_rtp(short8); +char8 __ovld __cnfn convert_char8_rtn(short8); +char8 __ovld __cnfn convert_char8_sat_rtn(short8); +char8 __ovld __cnfn convert_char8(short8); +char8 __ovld __cnfn convert_char8_sat(short8); +char8 __ovld __cnfn convert_char8_rte(ushort8); +char8 __ovld __cnfn convert_char8_sat_rte(ushort8); +char8 __ovld __cnfn convert_char8_rtz(ushort8); +char8 __ovld __cnfn convert_char8_sat_rtz(ushort8); +char8 __ovld __cnfn convert_char8_rtp(ushort8); +char8 __ovld __cnfn convert_char8_sat_rtp(ushort8); +char8 __ovld __cnfn convert_char8_rtn(ushort8); +char8 __ovld __cnfn convert_char8_sat_rtn(ushort8); +char8 __ovld __cnfn convert_char8(ushort8); +char8 __ovld __cnfn convert_char8_sat(ushort8); +char8 __ovld __cnfn convert_char8_rte(int8); +char8 __ovld __cnfn convert_char8_sat_rte(int8); +char8 __ovld __cnfn convert_char8_rtz(int8); +char8 __ovld __cnfn convert_char8_sat_rtz(int8); +char8 __ovld __cnfn convert_char8_rtp(int8); +char8 __ovld __cnfn convert_char8_sat_rtp(int8); +char8 __ovld __cnfn convert_char8_rtn(int8); +char8 __ovld __cnfn convert_char8_sat_rtn(int8); +char8 __ovld __cnfn convert_char8(int8); +char8 __ovld __cnfn convert_char8_sat(int8); +char8 __ovld __cnfn convert_char8_rte(uint8); +char8 __ovld __cnfn convert_char8_sat_rte(uint8); +char8 __ovld __cnfn convert_char8_rtz(uint8); +char8 __ovld __cnfn convert_char8_sat_rtz(uint8); +char8 __ovld __cnfn convert_char8_rtp(uint8); +char8 __ovld __cnfn convert_char8_sat_rtp(uint8); +char8 __ovld __cnfn convert_char8_rtn(uint8); +char8 __ovld __cnfn convert_char8_sat_rtn(uint8); +char8 __ovld __cnfn convert_char8(uint8); +char8 __ovld __cnfn convert_char8_sat(uint8); +char8 __ovld __cnfn convert_char8_rte(long8); +char8 __ovld __cnfn convert_char8_sat_rte(long8); +char8 __ovld __cnfn convert_char8_rtz(long8); +char8 __ovld __cnfn convert_char8_sat_rtz(long8); +char8 __ovld __cnfn convert_char8_rtp(long8); +char8 __ovld __cnfn convert_char8_sat_rtp(long8); +char8 __ovld __cnfn convert_char8_rtn(long8); +char8 __ovld __cnfn convert_char8_sat_rtn(long8); +char8 __ovld __cnfn convert_char8(long8); +char8 __ovld __cnfn convert_char8_sat(long8); +char8 __ovld __cnfn convert_char8_rte(ulong8); +char8 __ovld __cnfn convert_char8_sat_rte(ulong8); +char8 __ovld __cnfn convert_char8_rtz(ulong8); +char8 __ovld __cnfn convert_char8_sat_rtz(ulong8); +char8 __ovld __cnfn convert_char8_rtp(ulong8); +char8 __ovld __cnfn convert_char8_sat_rtp(ulong8); +char8 __ovld __cnfn convert_char8_rtn(ulong8); +char8 __ovld __cnfn convert_char8_sat_rtn(ulong8); +char8 __ovld __cnfn convert_char8(ulong8); +char8 __ovld __cnfn convert_char8_sat(ulong8); +char8 __ovld __cnfn convert_char8_rte(float8); +char8 __ovld __cnfn convert_char8_sat_rte(float8); +char8 __ovld __cnfn convert_char8_rtz(float8); +char8 __ovld __cnfn convert_char8_sat_rtz(float8); +char8 __ovld __cnfn convert_char8_rtp(float8); +char8 __ovld __cnfn convert_char8_sat_rtp(float8); +char8 __ovld __cnfn convert_char8_rtn(float8); +char8 __ovld __cnfn convert_char8_sat_rtn(float8); +char8 __ovld __cnfn convert_char8(float8); +char8 __ovld __cnfn convert_char8_sat(float8); +uchar8 __ovld __cnfn convert_uchar8_rte(char8); +uchar8 __ovld __cnfn convert_uchar8_sat_rte(char8); +uchar8 __ovld __cnfn convert_uchar8_rtz(char8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtz(char8); +uchar8 __ovld __cnfn convert_uchar8_rtp(char8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtp(char8); +uchar8 __ovld __cnfn convert_uchar8_rtn(char8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtn(char8); +uchar8 __ovld __cnfn convert_uchar8(char8); +uchar8 __ovld __cnfn convert_uchar8_sat(char8); +uchar8 __ovld __cnfn convert_uchar8_rte(uchar8); +uchar8 __ovld __cnfn convert_uchar8_sat_rte(uchar8); +uchar8 __ovld __cnfn convert_uchar8_rtz(uchar8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtz(uchar8); +uchar8 __ovld __cnfn convert_uchar8_rtp(uchar8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtp(uchar8); +uchar8 __ovld __cnfn convert_uchar8_rtn(uchar8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtn(uchar8); +uchar8 __ovld __cnfn convert_uchar8(uchar8); +uchar8 __ovld __cnfn convert_uchar8_sat(uchar8); +uchar8 __ovld __cnfn convert_uchar8_rte(short8); +uchar8 __ovld __cnfn convert_uchar8_sat_rte(short8); +uchar8 __ovld __cnfn convert_uchar8_rtz(short8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtz(short8); +uchar8 __ovld __cnfn convert_uchar8_rtp(short8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtp(short8); +uchar8 __ovld __cnfn convert_uchar8_rtn(short8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtn(short8); +uchar8 __ovld __cnfn convert_uchar8(short8); +uchar8 __ovld __cnfn convert_uchar8_sat(short8); +uchar8 __ovld __cnfn convert_uchar8_rte(ushort8); +uchar8 __ovld __cnfn convert_uchar8_sat_rte(ushort8); +uchar8 __ovld __cnfn convert_uchar8_rtz(ushort8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtz(ushort8); +uchar8 __ovld __cnfn convert_uchar8_rtp(ushort8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtp(ushort8); +uchar8 __ovld __cnfn convert_uchar8_rtn(ushort8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtn(ushort8); +uchar8 __ovld __cnfn convert_uchar8(ushort8); +uchar8 __ovld __cnfn convert_uchar8_sat(ushort8); +uchar8 __ovld __cnfn convert_uchar8_rte(int8); +uchar8 __ovld __cnfn convert_uchar8_sat_rte(int8); +uchar8 __ovld __cnfn convert_uchar8_rtz(int8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtz(int8); +uchar8 __ovld __cnfn convert_uchar8_rtp(int8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtp(int8); +uchar8 __ovld __cnfn convert_uchar8_rtn(int8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtn(int8); +uchar8 __ovld __cnfn convert_uchar8(int8); +uchar8 __ovld __cnfn convert_uchar8_sat(int8); +uchar8 __ovld __cnfn convert_uchar8_rte(uint8); +uchar8 __ovld __cnfn convert_uchar8_sat_rte(uint8); +uchar8 __ovld __cnfn convert_uchar8_rtz(uint8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtz(uint8); +uchar8 __ovld __cnfn convert_uchar8_rtp(uint8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtp(uint8); +uchar8 __ovld __cnfn convert_uchar8_rtn(uint8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtn(uint8); +uchar8 __ovld __cnfn convert_uchar8(uint8); +uchar8 __ovld __cnfn convert_uchar8_sat(uint8); +uchar8 __ovld __cnfn convert_uchar8_rte(long8); +uchar8 __ovld __cnfn convert_uchar8_sat_rte(long8); +uchar8 __ovld __cnfn convert_uchar8_rtz(long8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtz(long8); +uchar8 __ovld __cnfn convert_uchar8_rtp(long8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtp(long8); +uchar8 __ovld __cnfn convert_uchar8_rtn(long8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtn(long8); +uchar8 __ovld __cnfn convert_uchar8(long8); +uchar8 __ovld __cnfn convert_uchar8_sat(long8); +uchar8 __ovld __cnfn convert_uchar8_rte(ulong8); +uchar8 __ovld __cnfn convert_uchar8_sat_rte(ulong8); +uchar8 __ovld __cnfn convert_uchar8_rtz(ulong8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtz(ulong8); +uchar8 __ovld __cnfn convert_uchar8_rtp(ulong8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtp(ulong8); +uchar8 __ovld __cnfn convert_uchar8_rtn(ulong8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtn(ulong8); +uchar8 __ovld __cnfn convert_uchar8(ulong8); +uchar8 __ovld __cnfn convert_uchar8_sat(ulong8); +uchar8 __ovld __cnfn convert_uchar8_rte(float8); +uchar8 __ovld __cnfn convert_uchar8_sat_rte(float8); +uchar8 __ovld __cnfn convert_uchar8_rtz(float8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtz(float8); +uchar8 __ovld __cnfn convert_uchar8_rtp(float8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtp(float8); +uchar8 __ovld __cnfn convert_uchar8_rtn(float8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtn(float8); +uchar8 __ovld __cnfn convert_uchar8(float8); +uchar8 __ovld __cnfn convert_uchar8_sat(float8); +short8 __ovld __cnfn convert_short8_rte(char8); +short8 __ovld __cnfn convert_short8_sat_rte(char8); +short8 __ovld __cnfn convert_short8_rtz(char8); +short8 __ovld __cnfn convert_short8_sat_rtz(char8); +short8 __ovld __cnfn convert_short8_rtp(char8); +short8 __ovld __cnfn convert_short8_sat_rtp(char8); +short8 __ovld __cnfn convert_short8_rtn(char8); +short8 __ovld __cnfn convert_short8_sat_rtn(char8); +short8 __ovld __cnfn convert_short8(char8); +short8 __ovld __cnfn convert_short8_sat(char8); +short8 __ovld __cnfn convert_short8_rte(uchar8); +short8 __ovld __cnfn convert_short8_sat_rte(uchar8); +short8 __ovld __cnfn convert_short8_rtz(uchar8); +short8 __ovld __cnfn convert_short8_sat_rtz(uchar8); +short8 __ovld __cnfn convert_short8_rtp(uchar8); +short8 __ovld __cnfn convert_short8_sat_rtp(uchar8); +short8 __ovld __cnfn convert_short8_rtn(uchar8); +short8 __ovld __cnfn convert_short8_sat_rtn(uchar8); +short8 __ovld __cnfn convert_short8(uchar8); +short8 __ovld __cnfn convert_short8_sat(uchar8); +short8 __ovld __cnfn convert_short8_rte(short8); +short8 __ovld __cnfn convert_short8_sat_rte(short8); +short8 __ovld __cnfn convert_short8_rtz(short8); +short8 __ovld __cnfn convert_short8_sat_rtz(short8); +short8 __ovld __cnfn convert_short8_rtp(short8); +short8 __ovld __cnfn convert_short8_sat_rtp(short8); +short8 __ovld __cnfn convert_short8_rtn(short8); +short8 __ovld __cnfn convert_short8_sat_rtn(short8); +short8 __ovld __cnfn convert_short8(short8); +short8 __ovld __cnfn convert_short8_sat(short8); +short8 __ovld __cnfn convert_short8_rte(ushort8); +short8 __ovld __cnfn convert_short8_sat_rte(ushort8); +short8 __ovld __cnfn convert_short8_rtz(ushort8); +short8 __ovld __cnfn convert_short8_sat_rtz(ushort8); +short8 __ovld __cnfn convert_short8_rtp(ushort8); +short8 __ovld __cnfn convert_short8_sat_rtp(ushort8); +short8 __ovld __cnfn convert_short8_rtn(ushort8); +short8 __ovld __cnfn convert_short8_sat_rtn(ushort8); +short8 __ovld __cnfn convert_short8(ushort8); +short8 __ovld __cnfn convert_short8_sat(ushort8); +short8 __ovld __cnfn convert_short8_rte(int8); +short8 __ovld __cnfn convert_short8_sat_rte(int8); +short8 __ovld __cnfn convert_short8_rtz(int8); +short8 __ovld __cnfn convert_short8_sat_rtz(int8); +short8 __ovld __cnfn convert_short8_rtp(int8); +short8 __ovld __cnfn convert_short8_sat_rtp(int8); +short8 __ovld __cnfn convert_short8_rtn(int8); +short8 __ovld __cnfn convert_short8_sat_rtn(int8); +short8 __ovld __cnfn convert_short8(int8); +short8 __ovld __cnfn convert_short8_sat(int8); +short8 __ovld __cnfn convert_short8_rte(uint8); +short8 __ovld __cnfn convert_short8_sat_rte(uint8); +short8 __ovld __cnfn convert_short8_rtz(uint8); +short8 __ovld __cnfn convert_short8_sat_rtz(uint8); +short8 __ovld __cnfn convert_short8_rtp(uint8); +short8 __ovld __cnfn convert_short8_sat_rtp(uint8); +short8 __ovld __cnfn convert_short8_rtn(uint8); +short8 __ovld __cnfn convert_short8_sat_rtn(uint8); +short8 __ovld __cnfn convert_short8(uint8); +short8 __ovld __cnfn convert_short8_sat(uint8); +short8 __ovld __cnfn convert_short8_rte(long8); +short8 __ovld __cnfn convert_short8_sat_rte(long8); +short8 __ovld __cnfn convert_short8_rtz(long8); +short8 __ovld __cnfn convert_short8_sat_rtz(long8); +short8 __ovld __cnfn convert_short8_rtp(long8); +short8 __ovld __cnfn convert_short8_sat_rtp(long8); +short8 __ovld __cnfn convert_short8_rtn(long8); +short8 __ovld __cnfn convert_short8_sat_rtn(long8); +short8 __ovld __cnfn convert_short8(long8); +short8 __ovld __cnfn convert_short8_sat(long8); +short8 __ovld __cnfn convert_short8_rte(ulong8); +short8 __ovld __cnfn convert_short8_sat_rte(ulong8); +short8 __ovld __cnfn convert_short8_rtz(ulong8); +short8 __ovld __cnfn convert_short8_sat_rtz(ulong8); +short8 __ovld __cnfn convert_short8_rtp(ulong8); +short8 __ovld __cnfn convert_short8_sat_rtp(ulong8); +short8 __ovld __cnfn convert_short8_rtn(ulong8); +short8 __ovld __cnfn convert_short8_sat_rtn(ulong8); +short8 __ovld __cnfn convert_short8(ulong8); +short8 __ovld __cnfn convert_short8_sat(ulong8); +short8 __ovld __cnfn convert_short8_rte(float8); +short8 __ovld __cnfn convert_short8_sat_rte(float8); +short8 __ovld __cnfn convert_short8_rtz(float8); +short8 __ovld __cnfn convert_short8_sat_rtz(float8); +short8 __ovld __cnfn convert_short8_rtp(float8); +short8 __ovld __cnfn convert_short8_sat_rtp(float8); +short8 __ovld __cnfn convert_short8_rtn(float8); +short8 __ovld __cnfn convert_short8_sat_rtn(float8); +short8 __ovld __cnfn convert_short8(float8); +short8 __ovld __cnfn convert_short8_sat(float8); +ushort8 __ovld __cnfn convert_ushort8_rte(char8); +ushort8 __ovld __cnfn convert_ushort8_sat_rte(char8); +ushort8 __ovld __cnfn convert_ushort8_rtz(char8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtz(char8); +ushort8 __ovld __cnfn convert_ushort8_rtp(char8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtp(char8); +ushort8 __ovld __cnfn convert_ushort8_rtn(char8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtn(char8); +ushort8 __ovld __cnfn convert_ushort8(char8); +ushort8 __ovld __cnfn convert_ushort8_sat(char8); +ushort8 __ovld __cnfn convert_ushort8_rte(uchar8); +ushort8 __ovld __cnfn convert_ushort8_sat_rte(uchar8); +ushort8 __ovld __cnfn convert_ushort8_rtz(uchar8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtz(uchar8); +ushort8 __ovld __cnfn convert_ushort8_rtp(uchar8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtp(uchar8); +ushort8 __ovld __cnfn convert_ushort8_rtn(uchar8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtn(uchar8); +ushort8 __ovld __cnfn convert_ushort8(uchar8); +ushort8 __ovld __cnfn convert_ushort8_sat(uchar8); +ushort8 __ovld __cnfn convert_ushort8_rte(short8); +ushort8 __ovld __cnfn convert_ushort8_sat_rte(short8); +ushort8 __ovld __cnfn convert_ushort8_rtz(short8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtz(short8); +ushort8 __ovld __cnfn convert_ushort8_rtp(short8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtp(short8); +ushort8 __ovld __cnfn convert_ushort8_rtn(short8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtn(short8); +ushort8 __ovld __cnfn convert_ushort8(short8); +ushort8 __ovld __cnfn convert_ushort8_sat(short8); +ushort8 __ovld __cnfn convert_ushort8_rte(ushort8); +ushort8 __ovld __cnfn convert_ushort8_sat_rte(ushort8); +ushort8 __ovld __cnfn convert_ushort8_rtz(ushort8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtz(ushort8); +ushort8 __ovld __cnfn convert_ushort8_rtp(ushort8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtp(ushort8); +ushort8 __ovld __cnfn convert_ushort8_rtn(ushort8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtn(ushort8); +ushort8 __ovld __cnfn convert_ushort8(ushort8); +ushort8 __ovld __cnfn convert_ushort8_sat(ushort8); +ushort8 __ovld __cnfn convert_ushort8_rte(int8); +ushort8 __ovld __cnfn convert_ushort8_sat_rte(int8); +ushort8 __ovld __cnfn convert_ushort8_rtz(int8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtz(int8); +ushort8 __ovld __cnfn convert_ushort8_rtp(int8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtp(int8); +ushort8 __ovld __cnfn convert_ushort8_rtn(int8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtn(int8); +ushort8 __ovld __cnfn convert_ushort8(int8); +ushort8 __ovld __cnfn convert_ushort8_sat(int8); +ushort8 __ovld __cnfn convert_ushort8_rte(uint8); +ushort8 __ovld __cnfn convert_ushort8_sat_rte(uint8); +ushort8 __ovld __cnfn convert_ushort8_rtz(uint8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtz(uint8); +ushort8 __ovld __cnfn convert_ushort8_rtp(uint8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtp(uint8); +ushort8 __ovld __cnfn convert_ushort8_rtn(uint8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtn(uint8); +ushort8 __ovld __cnfn convert_ushort8(uint8); +ushort8 __ovld __cnfn convert_ushort8_sat(uint8); +ushort8 __ovld __cnfn convert_ushort8_rte(long8); +ushort8 __ovld __cnfn convert_ushort8_sat_rte(long8); +ushort8 __ovld __cnfn convert_ushort8_rtz(long8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtz(long8); +ushort8 __ovld __cnfn convert_ushort8_rtp(long8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtp(long8); +ushort8 __ovld __cnfn convert_ushort8_rtn(long8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtn(long8); +ushort8 __ovld __cnfn convert_ushort8(long8); +ushort8 __ovld __cnfn convert_ushort8_sat(long8); +ushort8 __ovld __cnfn convert_ushort8_rte(ulong8); +ushort8 __ovld __cnfn convert_ushort8_sat_rte(ulong8); +ushort8 __ovld __cnfn convert_ushort8_rtz(ulong8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtz(ulong8); +ushort8 __ovld __cnfn convert_ushort8_rtp(ulong8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtp(ulong8); +ushort8 __ovld __cnfn convert_ushort8_rtn(ulong8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtn(ulong8); +ushort8 __ovld __cnfn convert_ushort8(ulong8); +ushort8 __ovld __cnfn convert_ushort8_sat(ulong8); +ushort8 __ovld __cnfn convert_ushort8_rte(float8); +ushort8 __ovld __cnfn convert_ushort8_sat_rte(float8); +ushort8 __ovld __cnfn convert_ushort8_rtz(float8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtz(float8); +ushort8 __ovld __cnfn convert_ushort8_rtp(float8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtp(float8); +ushort8 __ovld __cnfn convert_ushort8_rtn(float8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtn(float8); +ushort8 __ovld __cnfn convert_ushort8(float8); +ushort8 __ovld __cnfn convert_ushort8_sat(float8); +int8 __ovld __cnfn convert_int8_rte(char8); +int8 __ovld __cnfn convert_int8_sat_rte(char8); +int8 __ovld __cnfn convert_int8_rtz(char8); +int8 __ovld __cnfn convert_int8_sat_rtz(char8); +int8 __ovld __cnfn convert_int8_rtp(char8); +int8 __ovld __cnfn convert_int8_sat_rtp(char8); +int8 __ovld __cnfn convert_int8_rtn(char8); +int8 __ovld __cnfn convert_int8_sat_rtn(char8); +int8 __ovld __cnfn convert_int8(char8); +int8 __ovld __cnfn convert_int8_sat(char8); +int8 __ovld __cnfn convert_int8_rte(uchar8); +int8 __ovld __cnfn convert_int8_sat_rte(uchar8); +int8 __ovld __cnfn convert_int8_rtz(uchar8); +int8 __ovld __cnfn convert_int8_sat_rtz(uchar8); +int8 __ovld __cnfn convert_int8_rtp(uchar8); +int8 __ovld __cnfn convert_int8_sat_rtp(uchar8); +int8 __ovld __cnfn convert_int8_rtn(uchar8); +int8 __ovld __cnfn convert_int8_sat_rtn(uchar8); +int8 __ovld __cnfn convert_int8(uchar8); +int8 __ovld __cnfn convert_int8_sat(uchar8); +int8 __ovld __cnfn convert_int8_rte(short8); +int8 __ovld __cnfn convert_int8_sat_rte(short8); +int8 __ovld __cnfn convert_int8_rtz(short8); +int8 __ovld __cnfn convert_int8_sat_rtz(short8); +int8 __ovld __cnfn convert_int8_rtp(short8); +int8 __ovld __cnfn convert_int8_sat_rtp(short8); +int8 __ovld __cnfn convert_int8_rtn(short8); +int8 __ovld __cnfn convert_int8_sat_rtn(short8); +int8 __ovld __cnfn convert_int8(short8); +int8 __ovld __cnfn convert_int8_sat(short8); +int8 __ovld __cnfn convert_int8_rte(ushort8); +int8 __ovld __cnfn convert_int8_sat_rte(ushort8); +int8 __ovld __cnfn convert_int8_rtz(ushort8); +int8 __ovld __cnfn convert_int8_sat_rtz(ushort8); +int8 __ovld __cnfn convert_int8_rtp(ushort8); +int8 __ovld __cnfn convert_int8_sat_rtp(ushort8); +int8 __ovld __cnfn convert_int8_rtn(ushort8); +int8 __ovld __cnfn convert_int8_sat_rtn(ushort8); +int8 __ovld __cnfn convert_int8(ushort8); +int8 __ovld __cnfn convert_int8_sat(ushort8); +int8 __ovld __cnfn convert_int8_rte(int8); +int8 __ovld __cnfn convert_int8_sat_rte(int8); +int8 __ovld __cnfn convert_int8_rtz(int8); +int8 __ovld __cnfn convert_int8_sat_rtz(int8); +int8 __ovld __cnfn convert_int8_rtp(int8); +int8 __ovld __cnfn convert_int8_sat_rtp(int8); +int8 __ovld __cnfn convert_int8_rtn(int8); +int8 __ovld __cnfn convert_int8_sat_rtn(int8); +int8 __ovld __cnfn convert_int8(int8); +int8 __ovld __cnfn convert_int8_sat(int8); +int8 __ovld __cnfn convert_int8_rte(uint8); +int8 __ovld __cnfn convert_int8_sat_rte(uint8); +int8 __ovld __cnfn convert_int8_rtz(uint8); +int8 __ovld __cnfn convert_int8_sat_rtz(uint8); +int8 __ovld __cnfn convert_int8_rtp(uint8); +int8 __ovld __cnfn convert_int8_sat_rtp(uint8); +int8 __ovld __cnfn convert_int8_rtn(uint8); +int8 __ovld __cnfn convert_int8_sat_rtn(uint8); +int8 __ovld __cnfn convert_int8(uint8); +int8 __ovld __cnfn convert_int8_sat(uint8); +int8 __ovld __cnfn convert_int8_rte(long8); +int8 __ovld __cnfn convert_int8_sat_rte(long8); +int8 __ovld __cnfn convert_int8_rtz(long8); +int8 __ovld __cnfn convert_int8_sat_rtz(long8); +int8 __ovld __cnfn convert_int8_rtp(long8); +int8 __ovld __cnfn convert_int8_sat_rtp(long8); +int8 __ovld __cnfn convert_int8_rtn(long8); +int8 __ovld __cnfn convert_int8_sat_rtn(long8); +int8 __ovld __cnfn convert_int8(long8); +int8 __ovld __cnfn convert_int8_sat(long8); +int8 __ovld __cnfn convert_int8_rte(ulong8); +int8 __ovld __cnfn convert_int8_sat_rte(ulong8); +int8 __ovld __cnfn convert_int8_rtz(ulong8); +int8 __ovld __cnfn convert_int8_sat_rtz(ulong8); +int8 __ovld __cnfn convert_int8_rtp(ulong8); +int8 __ovld __cnfn convert_int8_sat_rtp(ulong8); +int8 __ovld __cnfn convert_int8_rtn(ulong8); +int8 __ovld __cnfn convert_int8_sat_rtn(ulong8); +int8 __ovld __cnfn convert_int8(ulong8); +int8 __ovld __cnfn convert_int8_sat(ulong8); +int8 __ovld __cnfn convert_int8_rte(float8); +int8 __ovld __cnfn convert_int8_sat_rte(float8); +int8 __ovld __cnfn convert_int8_rtz(float8); +int8 __ovld __cnfn convert_int8_sat_rtz(float8); +int8 __ovld __cnfn convert_int8_rtp(float8); +int8 __ovld __cnfn convert_int8_sat_rtp(float8); +int8 __ovld __cnfn convert_int8_rtn(float8); +int8 __ovld __cnfn convert_int8_sat_rtn(float8); +int8 __ovld __cnfn convert_int8(float8); +int8 __ovld __cnfn convert_int8_sat(float8); +uint8 __ovld __cnfn convert_uint8_rte(char8); +uint8 __ovld __cnfn convert_uint8_sat_rte(char8); +uint8 __ovld __cnfn convert_uint8_rtz(char8); +uint8 __ovld __cnfn convert_uint8_sat_rtz(char8); +uint8 __ovld __cnfn convert_uint8_rtp(char8); +uint8 __ovld __cnfn convert_uint8_sat_rtp(char8); +uint8 __ovld __cnfn convert_uint8_rtn(char8); +uint8 __ovld __cnfn convert_uint8_sat_rtn(char8); +uint8 __ovld __cnfn convert_uint8(char8); +uint8 __ovld __cnfn convert_uint8_sat(char8); +uint8 __ovld __cnfn convert_uint8_rte(uchar8); +uint8 __ovld __cnfn convert_uint8_sat_rte(uchar8); +uint8 __ovld __cnfn convert_uint8_rtz(uchar8); +uint8 __ovld __cnfn convert_uint8_sat_rtz(uchar8); +uint8 __ovld __cnfn convert_uint8_rtp(uchar8); +uint8 __ovld __cnfn convert_uint8_sat_rtp(uchar8); +uint8 __ovld __cnfn convert_uint8_rtn(uchar8); +uint8 __ovld __cnfn convert_uint8_sat_rtn(uchar8); +uint8 __ovld __cnfn convert_uint8(uchar8); +uint8 __ovld __cnfn convert_uint8_sat(uchar8); +uint8 __ovld __cnfn convert_uint8_rte(short8); +uint8 __ovld __cnfn convert_uint8_sat_rte(short8); +uint8 __ovld __cnfn convert_uint8_rtz(short8); +uint8 __ovld __cnfn convert_uint8_sat_rtz(short8); +uint8 __ovld __cnfn convert_uint8_rtp(short8); +uint8 __ovld __cnfn convert_uint8_sat_rtp(short8); +uint8 __ovld __cnfn convert_uint8_rtn(short8); +uint8 __ovld __cnfn convert_uint8_sat_rtn(short8); +uint8 __ovld __cnfn convert_uint8(short8); +uint8 __ovld __cnfn convert_uint8_sat(short8); +uint8 __ovld __cnfn convert_uint8_rte(ushort8); +uint8 __ovld __cnfn convert_uint8_sat_rte(ushort8); +uint8 __ovld __cnfn convert_uint8_rtz(ushort8); +uint8 __ovld __cnfn convert_uint8_sat_rtz(ushort8); +uint8 __ovld __cnfn convert_uint8_rtp(ushort8); +uint8 __ovld __cnfn convert_uint8_sat_rtp(ushort8); +uint8 __ovld __cnfn convert_uint8_rtn(ushort8); +uint8 __ovld __cnfn convert_uint8_sat_rtn(ushort8); +uint8 __ovld __cnfn convert_uint8(ushort8); +uint8 __ovld __cnfn convert_uint8_sat(ushort8); +uint8 __ovld __cnfn convert_uint8_rte(int8); +uint8 __ovld __cnfn convert_uint8_sat_rte(int8); +uint8 __ovld __cnfn convert_uint8_rtz(int8); +uint8 __ovld __cnfn convert_uint8_sat_rtz(int8); +uint8 __ovld __cnfn convert_uint8_rtp(int8); +uint8 __ovld __cnfn convert_uint8_sat_rtp(int8); +uint8 __ovld __cnfn convert_uint8_rtn(int8); +uint8 __ovld __cnfn convert_uint8_sat_rtn(int8); +uint8 __ovld __cnfn convert_uint8(int8); +uint8 __ovld __cnfn convert_uint8_sat(int8); +uint8 __ovld __cnfn convert_uint8_rte(uint8); +uint8 __ovld __cnfn convert_uint8_sat_rte(uint8); +uint8 __ovld __cnfn convert_uint8_rtz(uint8); +uint8 __ovld __cnfn convert_uint8_sat_rtz(uint8); +uint8 __ovld __cnfn convert_uint8_rtp(uint8); +uint8 __ovld __cnfn convert_uint8_sat_rtp(uint8); +uint8 __ovld __cnfn convert_uint8_rtn(uint8); +uint8 __ovld __cnfn convert_uint8_sat_rtn(uint8); +uint8 __ovld __cnfn convert_uint8(uint8); +uint8 __ovld __cnfn convert_uint8_sat(uint8); +uint8 __ovld __cnfn convert_uint8_rte(long8); +uint8 __ovld __cnfn convert_uint8_sat_rte(long8); +uint8 __ovld __cnfn convert_uint8_rtz(long8); +uint8 __ovld __cnfn convert_uint8_sat_rtz(long8); +uint8 __ovld __cnfn convert_uint8_rtp(long8); +uint8 __ovld __cnfn convert_uint8_sat_rtp(long8); +uint8 __ovld __cnfn convert_uint8_rtn(long8); +uint8 __ovld __cnfn convert_uint8_sat_rtn(long8); +uint8 __ovld __cnfn convert_uint8(long8); +uint8 __ovld __cnfn convert_uint8_sat(long8); +uint8 __ovld __cnfn convert_uint8_rte(ulong8); +uint8 __ovld __cnfn convert_uint8_sat_rte(ulong8); +uint8 __ovld __cnfn convert_uint8_rtz(ulong8); +uint8 __ovld __cnfn convert_uint8_sat_rtz(ulong8); +uint8 __ovld __cnfn convert_uint8_rtp(ulong8); +uint8 __ovld __cnfn convert_uint8_sat_rtp(ulong8); +uint8 __ovld __cnfn convert_uint8_rtn(ulong8); +uint8 __ovld __cnfn convert_uint8_sat_rtn(ulong8); +uint8 __ovld __cnfn convert_uint8(ulong8); +uint8 __ovld __cnfn convert_uint8_sat(ulong8); +uint8 __ovld __cnfn convert_uint8_rte(float8); +uint8 __ovld __cnfn convert_uint8_sat_rte(float8); +uint8 __ovld __cnfn convert_uint8_rtz(float8); +uint8 __ovld __cnfn convert_uint8_sat_rtz(float8); +uint8 __ovld __cnfn convert_uint8_rtp(float8); +uint8 __ovld __cnfn convert_uint8_sat_rtp(float8); +uint8 __ovld __cnfn convert_uint8_rtn(float8); +uint8 __ovld __cnfn convert_uint8_sat_rtn(float8); +uint8 __ovld __cnfn convert_uint8(float8); +uint8 __ovld __cnfn convert_uint8_sat(float8); +long8 __ovld __cnfn convert_long8_rte(char8); +long8 __ovld __cnfn convert_long8_sat_rte(char8); +long8 __ovld __cnfn convert_long8_rtz(char8); +long8 __ovld __cnfn convert_long8_sat_rtz(char8); +long8 __ovld __cnfn convert_long8_rtp(char8); +long8 __ovld __cnfn convert_long8_sat_rtp(char8); +long8 __ovld __cnfn convert_long8_rtn(char8); +long8 __ovld __cnfn convert_long8_sat_rtn(char8); +long8 __ovld __cnfn convert_long8(char8); +long8 __ovld __cnfn convert_long8_sat(char8); +long8 __ovld __cnfn convert_long8_rte(uchar8); +long8 __ovld __cnfn convert_long8_sat_rte(uchar8); +long8 __ovld __cnfn convert_long8_rtz(uchar8); +long8 __ovld __cnfn convert_long8_sat_rtz(uchar8); +long8 __ovld __cnfn convert_long8_rtp(uchar8); +long8 __ovld __cnfn convert_long8_sat_rtp(uchar8); +long8 __ovld __cnfn convert_long8_rtn(uchar8); +long8 __ovld __cnfn convert_long8_sat_rtn(uchar8); +long8 __ovld __cnfn convert_long8(uchar8); +long8 __ovld __cnfn convert_long8_sat(uchar8); +long8 __ovld __cnfn convert_long8_rte(short8); +long8 __ovld __cnfn convert_long8_sat_rte(short8); +long8 __ovld __cnfn convert_long8_rtz(short8); +long8 __ovld __cnfn convert_long8_sat_rtz(short8); +long8 __ovld __cnfn convert_long8_rtp(short8); +long8 __ovld __cnfn convert_long8_sat_rtp(short8); +long8 __ovld __cnfn convert_long8_rtn(short8); +long8 __ovld __cnfn convert_long8_sat_rtn(short8); +long8 __ovld __cnfn convert_long8(short8); +long8 __ovld __cnfn convert_long8_sat(short8); +long8 __ovld __cnfn convert_long8_rte(ushort8); +long8 __ovld __cnfn convert_long8_sat_rte(ushort8); +long8 __ovld __cnfn convert_long8_rtz(ushort8); +long8 __ovld __cnfn convert_long8_sat_rtz(ushort8); +long8 __ovld __cnfn convert_long8_rtp(ushort8); +long8 __ovld __cnfn convert_long8_sat_rtp(ushort8); +long8 __ovld __cnfn convert_long8_rtn(ushort8); +long8 __ovld __cnfn convert_long8_sat_rtn(ushort8); +long8 __ovld __cnfn convert_long8(ushort8); +long8 __ovld __cnfn convert_long8_sat(ushort8); +long8 __ovld __cnfn convert_long8_rte(int8); +long8 __ovld __cnfn convert_long8_sat_rte(int8); +long8 __ovld __cnfn convert_long8_rtz(int8); +long8 __ovld __cnfn convert_long8_sat_rtz(int8); +long8 __ovld __cnfn convert_long8_rtp(int8); +long8 __ovld __cnfn convert_long8_sat_rtp(int8); +long8 __ovld __cnfn convert_long8_rtn(int8); +long8 __ovld __cnfn convert_long8_sat_rtn(int8); +long8 __ovld __cnfn convert_long8(int8); +long8 __ovld __cnfn convert_long8_sat(int8); +long8 __ovld __cnfn convert_long8_rte(uint8); +long8 __ovld __cnfn convert_long8_sat_rte(uint8); +long8 __ovld __cnfn convert_long8_rtz(uint8); +long8 __ovld __cnfn convert_long8_sat_rtz(uint8); +long8 __ovld __cnfn convert_long8_rtp(uint8); +long8 __ovld __cnfn convert_long8_sat_rtp(uint8); +long8 __ovld __cnfn convert_long8_rtn(uint8); +long8 __ovld __cnfn convert_long8_sat_rtn(uint8); +long8 __ovld __cnfn convert_long8(uint8); +long8 __ovld __cnfn convert_long8_sat(uint8); +long8 __ovld __cnfn convert_long8_rte(long8); +long8 __ovld __cnfn convert_long8_sat_rte(long8); +long8 __ovld __cnfn convert_long8_rtz(long8); +long8 __ovld __cnfn convert_long8_sat_rtz(long8); +long8 __ovld __cnfn convert_long8_rtp(long8); +long8 __ovld __cnfn convert_long8_sat_rtp(long8); +long8 __ovld __cnfn convert_long8_rtn(long8); +long8 __ovld __cnfn convert_long8_sat_rtn(long8); +long8 __ovld __cnfn convert_long8(long8); +long8 __ovld __cnfn convert_long8_sat(long8); +long8 __ovld __cnfn convert_long8_rte(ulong8); +long8 __ovld __cnfn convert_long8_sat_rte(ulong8); +long8 __ovld __cnfn convert_long8_rtz(ulong8); +long8 __ovld __cnfn convert_long8_sat_rtz(ulong8); +long8 __ovld __cnfn convert_long8_rtp(ulong8); +long8 __ovld __cnfn convert_long8_sat_rtp(ulong8); +long8 __ovld __cnfn convert_long8_rtn(ulong8); +long8 __ovld __cnfn convert_long8_sat_rtn(ulong8); +long8 __ovld __cnfn convert_long8(ulong8); +long8 __ovld __cnfn convert_long8_sat(ulong8); +long8 __ovld __cnfn convert_long8_rte(float8); +long8 __ovld __cnfn convert_long8_sat_rte(float8); +long8 __ovld __cnfn convert_long8_rtz(float8); +long8 __ovld __cnfn convert_long8_sat_rtz(float8); +long8 __ovld __cnfn convert_long8_rtp(float8); +long8 __ovld __cnfn convert_long8_sat_rtp(float8); +long8 __ovld __cnfn convert_long8_rtn(float8); +long8 __ovld __cnfn convert_long8_sat_rtn(float8); +long8 __ovld __cnfn convert_long8(float8); +long8 __ovld __cnfn convert_long8_sat(float8); +ulong8 __ovld __cnfn convert_ulong8_rte(char8); +ulong8 __ovld __cnfn convert_ulong8_sat_rte(char8); +ulong8 __ovld __cnfn convert_ulong8_rtz(char8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtz(char8); +ulong8 __ovld __cnfn convert_ulong8_rtp(char8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtp(char8); +ulong8 __ovld __cnfn convert_ulong8_rtn(char8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtn(char8); +ulong8 __ovld __cnfn convert_ulong8(char8); +ulong8 __ovld __cnfn convert_ulong8_sat(char8); +ulong8 __ovld __cnfn convert_ulong8_rte(uchar8); +ulong8 __ovld __cnfn convert_ulong8_sat_rte(uchar8); +ulong8 __ovld __cnfn convert_ulong8_rtz(uchar8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtz(uchar8); +ulong8 __ovld __cnfn convert_ulong8_rtp(uchar8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtp(uchar8); +ulong8 __ovld __cnfn convert_ulong8_rtn(uchar8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtn(uchar8); +ulong8 __ovld __cnfn convert_ulong8(uchar8); +ulong8 __ovld __cnfn convert_ulong8_sat(uchar8); +ulong8 __ovld __cnfn convert_ulong8_rte(short8); +ulong8 __ovld __cnfn convert_ulong8_sat_rte(short8); +ulong8 __ovld __cnfn convert_ulong8_rtz(short8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtz(short8); +ulong8 __ovld __cnfn convert_ulong8_rtp(short8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtp(short8); +ulong8 __ovld __cnfn convert_ulong8_rtn(short8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtn(short8); +ulong8 __ovld __cnfn convert_ulong8(short8); +ulong8 __ovld __cnfn convert_ulong8_sat(short8); +ulong8 __ovld __cnfn convert_ulong8_rte(ushort8); +ulong8 __ovld __cnfn convert_ulong8_sat_rte(ushort8); +ulong8 __ovld __cnfn convert_ulong8_rtz(ushort8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtz(ushort8); +ulong8 __ovld __cnfn convert_ulong8_rtp(ushort8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtp(ushort8); +ulong8 __ovld __cnfn convert_ulong8_rtn(ushort8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtn(ushort8); +ulong8 __ovld __cnfn convert_ulong8(ushort8); +ulong8 __ovld __cnfn convert_ulong8_sat(ushort8); +ulong8 __ovld __cnfn convert_ulong8_rte(int8); +ulong8 __ovld __cnfn convert_ulong8_sat_rte(int8); +ulong8 __ovld __cnfn convert_ulong8_rtz(int8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtz(int8); +ulong8 __ovld __cnfn convert_ulong8_rtp(int8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtp(int8); +ulong8 __ovld __cnfn convert_ulong8_rtn(int8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtn(int8); +ulong8 __ovld __cnfn convert_ulong8(int8); +ulong8 __ovld __cnfn convert_ulong8_sat(int8); +ulong8 __ovld __cnfn convert_ulong8_rte(uint8); +ulong8 __ovld __cnfn convert_ulong8_sat_rte(uint8); +ulong8 __ovld __cnfn convert_ulong8_rtz(uint8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtz(uint8); +ulong8 __ovld __cnfn convert_ulong8_rtp(uint8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtp(uint8); +ulong8 __ovld __cnfn convert_ulong8_rtn(uint8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtn(uint8); +ulong8 __ovld __cnfn convert_ulong8(uint8); +ulong8 __ovld __cnfn convert_ulong8_sat(uint8); +ulong8 __ovld __cnfn convert_ulong8_rte(long8); +ulong8 __ovld __cnfn convert_ulong8_sat_rte(long8); +ulong8 __ovld __cnfn convert_ulong8_rtz(long8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtz(long8); +ulong8 __ovld __cnfn convert_ulong8_rtp(long8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtp(long8); +ulong8 __ovld __cnfn convert_ulong8_rtn(long8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtn(long8); +ulong8 __ovld __cnfn convert_ulong8(long8); +ulong8 __ovld __cnfn convert_ulong8_sat(long8); +ulong8 __ovld __cnfn convert_ulong8_rte(ulong8); +ulong8 __ovld __cnfn convert_ulong8_sat_rte(ulong8); +ulong8 __ovld __cnfn convert_ulong8_rtz(ulong8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtz(ulong8); +ulong8 __ovld __cnfn convert_ulong8_rtp(ulong8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtp(ulong8); +ulong8 __ovld __cnfn convert_ulong8_rtn(ulong8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtn(ulong8); +ulong8 __ovld __cnfn convert_ulong8(ulong8); +ulong8 __ovld __cnfn convert_ulong8_sat(ulong8); +ulong8 __ovld __cnfn convert_ulong8_rte(float8); +ulong8 __ovld __cnfn convert_ulong8_sat_rte(float8); +ulong8 __ovld __cnfn convert_ulong8_rtz(float8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtz(float8); +ulong8 __ovld __cnfn convert_ulong8_rtp(float8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtp(float8); +ulong8 __ovld __cnfn convert_ulong8_rtn(float8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtn(float8); +ulong8 __ovld __cnfn convert_ulong8(float8); +ulong8 __ovld __cnfn convert_ulong8_sat(float8); +float8 __ovld __cnfn convert_float8_rte(char8); +float8 __ovld __cnfn convert_float8_rtz(char8); +float8 __ovld __cnfn convert_float8_rtp(char8); +float8 __ovld __cnfn convert_float8_rtn(char8); +float8 __ovld __cnfn convert_float8(char8); +float8 __ovld __cnfn convert_float8_rte(uchar8); +float8 __ovld __cnfn convert_float8_rtz(uchar8); +float8 __ovld __cnfn convert_float8_rtp(uchar8); +float8 __ovld __cnfn convert_float8_rtn(uchar8); +float8 __ovld __cnfn convert_float8(uchar8); +float8 __ovld __cnfn convert_float8_rte(short8); +float8 __ovld __cnfn convert_float8_rtz(short8); +float8 __ovld __cnfn convert_float8_rtp(short8); +float8 __ovld __cnfn convert_float8_rtn(short8); +float8 __ovld __cnfn convert_float8(short8); +float8 __ovld __cnfn convert_float8_rte(ushort8); +float8 __ovld __cnfn convert_float8_rtz(ushort8); +float8 __ovld __cnfn convert_float8_rtp(ushort8); +float8 __ovld __cnfn convert_float8_rtn(ushort8); +float8 __ovld __cnfn convert_float8(ushort8); +float8 __ovld __cnfn convert_float8_rte(int8); +float8 __ovld __cnfn convert_float8_rtz(int8); +float8 __ovld __cnfn convert_float8_rtp(int8); +float8 __ovld __cnfn convert_float8_rtn(int8); +float8 __ovld __cnfn convert_float8(int8); +float8 __ovld __cnfn convert_float8_rte(uint8); +float8 __ovld __cnfn convert_float8_rtz(uint8); +float8 __ovld __cnfn convert_float8_rtp(uint8); +float8 __ovld __cnfn convert_float8_rtn(uint8); +float8 __ovld __cnfn convert_float8(uint8); +float8 __ovld __cnfn convert_float8_rte(long8); +float8 __ovld __cnfn convert_float8_rtz(long8); +float8 __ovld __cnfn convert_float8_rtp(long8); +float8 __ovld __cnfn convert_float8_rtn(long8); +float8 __ovld __cnfn convert_float8(long8); +float8 __ovld __cnfn convert_float8_rte(ulong8); +float8 __ovld __cnfn convert_float8_rtz(ulong8); +float8 __ovld __cnfn convert_float8_rtp(ulong8); +float8 __ovld __cnfn convert_float8_rtn(ulong8); +float8 __ovld __cnfn convert_float8(ulong8); +float8 __ovld __cnfn convert_float8_rte(float8); +float8 __ovld __cnfn convert_float8_rtz(float8); +float8 __ovld __cnfn convert_float8_rtp(float8); +float8 __ovld __cnfn convert_float8_rtn(float8); +float8 __ovld __cnfn convert_float8(float8); +char16 __ovld __cnfn convert_char16_rte(char16); +char16 __ovld __cnfn convert_char16_sat_rte(char16); +char16 __ovld __cnfn convert_char16_rtz(char16); +char16 __ovld __cnfn convert_char16_sat_rtz(char16); +char16 __ovld __cnfn convert_char16_rtp(char16); +char16 __ovld __cnfn convert_char16_sat_rtp(char16); +char16 __ovld __cnfn convert_char16_rtn(char16); +char16 __ovld __cnfn convert_char16_sat_rtn(char16); +char16 __ovld __cnfn convert_char16(char16); +char16 __ovld __cnfn convert_char16_sat(char16); +char16 __ovld __cnfn convert_char16_rte(uchar16); +char16 __ovld __cnfn convert_char16_sat_rte(uchar16); +char16 __ovld __cnfn convert_char16_rtz(uchar16); +char16 __ovld __cnfn convert_char16_sat_rtz(uchar16); +char16 __ovld __cnfn convert_char16_rtp(uchar16); +char16 __ovld __cnfn convert_char16_sat_rtp(uchar16); +char16 __ovld __cnfn convert_char16_rtn(uchar16); +char16 __ovld __cnfn convert_char16_sat_rtn(uchar16); +char16 __ovld __cnfn convert_char16(uchar16); +char16 __ovld __cnfn convert_char16_sat(uchar16); +char16 __ovld __cnfn convert_char16_rte(short16); +char16 __ovld __cnfn convert_char16_sat_rte(short16); +char16 __ovld __cnfn convert_char16_rtz(short16); +char16 __ovld __cnfn convert_char16_sat_rtz(short16); +char16 __ovld __cnfn convert_char16_rtp(short16); +char16 __ovld __cnfn convert_char16_sat_rtp(short16); +char16 __ovld __cnfn convert_char16_rtn(short16); +char16 __ovld __cnfn convert_char16_sat_rtn(short16); +char16 __ovld __cnfn convert_char16(short16); +char16 __ovld __cnfn convert_char16_sat(short16); +char16 __ovld __cnfn convert_char16_rte(ushort16); +char16 __ovld __cnfn convert_char16_sat_rte(ushort16); +char16 __ovld __cnfn convert_char16_rtz(ushort16); +char16 __ovld __cnfn convert_char16_sat_rtz(ushort16); +char16 __ovld __cnfn convert_char16_rtp(ushort16); +char16 __ovld __cnfn convert_char16_sat_rtp(ushort16); +char16 __ovld __cnfn convert_char16_rtn(ushort16); +char16 __ovld __cnfn convert_char16_sat_rtn(ushort16); +char16 __ovld __cnfn convert_char16(ushort16); +char16 __ovld __cnfn convert_char16_sat(ushort16); +char16 __ovld __cnfn convert_char16_rte(int16); +char16 __ovld __cnfn convert_char16_sat_rte(int16); +char16 __ovld __cnfn convert_char16_rtz(int16); +char16 __ovld __cnfn convert_char16_sat_rtz(int16); +char16 __ovld __cnfn convert_char16_rtp(int16); +char16 __ovld __cnfn convert_char16_sat_rtp(int16); +char16 __ovld __cnfn convert_char16_rtn(int16); +char16 __ovld __cnfn convert_char16_sat_rtn(int16); +char16 __ovld __cnfn convert_char16(int16); +char16 __ovld __cnfn convert_char16_sat(int16); +char16 __ovld __cnfn convert_char16_rte(uint16); +char16 __ovld __cnfn convert_char16_sat_rte(uint16); +char16 __ovld __cnfn convert_char16_rtz(uint16); +char16 __ovld __cnfn convert_char16_sat_rtz(uint16); +char16 __ovld __cnfn convert_char16_rtp(uint16); +char16 __ovld __cnfn convert_char16_sat_rtp(uint16); +char16 __ovld __cnfn convert_char16_rtn(uint16); +char16 __ovld __cnfn convert_char16_sat_rtn(uint16); +char16 __ovld __cnfn convert_char16(uint16); +char16 __ovld __cnfn convert_char16_sat(uint16); +char16 __ovld __cnfn convert_char16_rte(long16); +char16 __ovld __cnfn convert_char16_sat_rte(long16); +char16 __ovld __cnfn convert_char16_rtz(long16); +char16 __ovld __cnfn convert_char16_sat_rtz(long16); +char16 __ovld __cnfn convert_char16_rtp(long16); +char16 __ovld __cnfn convert_char16_sat_rtp(long16); +char16 __ovld __cnfn convert_char16_rtn(long16); +char16 __ovld __cnfn convert_char16_sat_rtn(long16); +char16 __ovld __cnfn convert_char16(long16); +char16 __ovld __cnfn convert_char16_sat(long16); +char16 __ovld __cnfn convert_char16_rte(ulong16); +char16 __ovld __cnfn convert_char16_sat_rte(ulong16); +char16 __ovld __cnfn convert_char16_rtz(ulong16); +char16 __ovld __cnfn convert_char16_sat_rtz(ulong16); +char16 __ovld __cnfn convert_char16_rtp(ulong16); +char16 __ovld __cnfn convert_char16_sat_rtp(ulong16); +char16 __ovld __cnfn convert_char16_rtn(ulong16); +char16 __ovld __cnfn convert_char16_sat_rtn(ulong16); +char16 __ovld __cnfn convert_char16(ulong16); +char16 __ovld __cnfn convert_char16_sat(ulong16); +char16 __ovld __cnfn convert_char16_rte(float16); +char16 __ovld __cnfn convert_char16_sat_rte(float16); +char16 __ovld __cnfn convert_char16_rtz(float16); +char16 __ovld __cnfn convert_char16_sat_rtz(float16); +char16 __ovld __cnfn convert_char16_rtp(float16); +char16 __ovld __cnfn convert_char16_sat_rtp(float16); +char16 __ovld __cnfn convert_char16_rtn(float16); +char16 __ovld __cnfn convert_char16_sat_rtn(float16); +char16 __ovld __cnfn convert_char16(float16); +char16 __ovld __cnfn convert_char16_sat(float16); +uchar16 __ovld __cnfn convert_uchar16_rte(char16); +uchar16 __ovld __cnfn convert_uchar16_sat_rte(char16); +uchar16 __ovld __cnfn convert_uchar16_rtz(char16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtz(char16); +uchar16 __ovld __cnfn convert_uchar16_rtp(char16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtp(char16); +uchar16 __ovld __cnfn convert_uchar16_rtn(char16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtn(char16); +uchar16 __ovld __cnfn convert_uchar16(char16); +uchar16 __ovld __cnfn convert_uchar16_sat(char16); +uchar16 __ovld __cnfn convert_uchar16_rte(uchar16); +uchar16 __ovld __cnfn convert_uchar16_sat_rte(uchar16); +uchar16 __ovld __cnfn convert_uchar16_rtz(uchar16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtz(uchar16); +uchar16 __ovld __cnfn convert_uchar16_rtp(uchar16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtp(uchar16); +uchar16 __ovld __cnfn convert_uchar16_rtn(uchar16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtn(uchar16); +uchar16 __ovld __cnfn convert_uchar16(uchar16); +uchar16 __ovld __cnfn convert_uchar16_sat(uchar16); +uchar16 __ovld __cnfn convert_uchar16_rte(short16); +uchar16 __ovld __cnfn convert_uchar16_sat_rte(short16); +uchar16 __ovld __cnfn convert_uchar16_rtz(short16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtz(short16); +uchar16 __ovld __cnfn convert_uchar16_rtp(short16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtp(short16); +uchar16 __ovld __cnfn convert_uchar16_rtn(short16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtn(short16); +uchar16 __ovld __cnfn convert_uchar16(short16); +uchar16 __ovld __cnfn convert_uchar16_sat(short16); +uchar16 __ovld __cnfn convert_uchar16_rte(ushort16); +uchar16 __ovld __cnfn convert_uchar16_sat_rte(ushort16); +uchar16 __ovld __cnfn convert_uchar16_rtz(ushort16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtz(ushort16); +uchar16 __ovld __cnfn convert_uchar16_rtp(ushort16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtp(ushort16); +uchar16 __ovld __cnfn convert_uchar16_rtn(ushort16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtn(ushort16); +uchar16 __ovld __cnfn convert_uchar16(ushort16); +uchar16 __ovld __cnfn convert_uchar16_sat(ushort16); +uchar16 __ovld __cnfn convert_uchar16_rte(int16); +uchar16 __ovld __cnfn convert_uchar16_sat_rte(int16); +uchar16 __ovld __cnfn convert_uchar16_rtz(int16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtz(int16); +uchar16 __ovld __cnfn convert_uchar16_rtp(int16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtp(int16); +uchar16 __ovld __cnfn convert_uchar16_rtn(int16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtn(int16); +uchar16 __ovld __cnfn convert_uchar16(int16); +uchar16 __ovld __cnfn convert_uchar16_sat(int16); +uchar16 __ovld __cnfn convert_uchar16_rte(uint16); +uchar16 __ovld __cnfn convert_uchar16_sat_rte(uint16); +uchar16 __ovld __cnfn convert_uchar16_rtz(uint16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtz(uint16); +uchar16 __ovld __cnfn convert_uchar16_rtp(uint16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtp(uint16); +uchar16 __ovld __cnfn convert_uchar16_rtn(uint16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtn(uint16); +uchar16 __ovld __cnfn convert_uchar16(uint16); +uchar16 __ovld __cnfn convert_uchar16_sat(uint16); +uchar16 __ovld __cnfn convert_uchar16_rte(long16); +uchar16 __ovld __cnfn convert_uchar16_sat_rte(long16); +uchar16 __ovld __cnfn convert_uchar16_rtz(long16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtz(long16); +uchar16 __ovld __cnfn convert_uchar16_rtp(long16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtp(long16); +uchar16 __ovld __cnfn convert_uchar16_rtn(long16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtn(long16); +uchar16 __ovld __cnfn convert_uchar16(long16); +uchar16 __ovld __cnfn convert_uchar16_sat(long16); +uchar16 __ovld __cnfn convert_uchar16_rte(ulong16); +uchar16 __ovld __cnfn convert_uchar16_sat_rte(ulong16); +uchar16 __ovld __cnfn convert_uchar16_rtz(ulong16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtz(ulong16); +uchar16 __ovld __cnfn convert_uchar16_rtp(ulong16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtp(ulong16); +uchar16 __ovld __cnfn convert_uchar16_rtn(ulong16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtn(ulong16); +uchar16 __ovld __cnfn convert_uchar16(ulong16); +uchar16 __ovld __cnfn convert_uchar16_sat(ulong16); +uchar16 __ovld __cnfn convert_uchar16_rte(float16); +uchar16 __ovld __cnfn convert_uchar16_sat_rte(float16); +uchar16 __ovld __cnfn convert_uchar16_rtz(float16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtz(float16); +uchar16 __ovld __cnfn convert_uchar16_rtp(float16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtp(float16); +uchar16 __ovld __cnfn convert_uchar16_rtn(float16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtn(float16); +uchar16 __ovld __cnfn convert_uchar16(float16); +uchar16 __ovld __cnfn convert_uchar16_sat(float16); +short16 __ovld __cnfn convert_short16_rte(char16); +short16 __ovld __cnfn convert_short16_sat_rte(char16); +short16 __ovld __cnfn convert_short16_rtz(char16); +short16 __ovld __cnfn convert_short16_sat_rtz(char16); +short16 __ovld __cnfn convert_short16_rtp(char16); +short16 __ovld __cnfn convert_short16_sat_rtp(char16); +short16 __ovld __cnfn convert_short16_rtn(char16); +short16 __ovld __cnfn convert_short16_sat_rtn(char16); +short16 __ovld __cnfn convert_short16(char16); +short16 __ovld __cnfn convert_short16_sat(char16); +short16 __ovld __cnfn convert_short16_rte(uchar16); +short16 __ovld __cnfn convert_short16_sat_rte(uchar16); +short16 __ovld __cnfn convert_short16_rtz(uchar16); +short16 __ovld __cnfn convert_short16_sat_rtz(uchar16); +short16 __ovld __cnfn convert_short16_rtp(uchar16); +short16 __ovld __cnfn convert_short16_sat_rtp(uchar16); +short16 __ovld __cnfn convert_short16_rtn(uchar16); +short16 __ovld __cnfn convert_short16_sat_rtn(uchar16); +short16 __ovld __cnfn convert_short16(uchar16); +short16 __ovld __cnfn convert_short16_sat(uchar16); +short16 __ovld __cnfn convert_short16_rte(short16); +short16 __ovld __cnfn convert_short16_sat_rte(short16); +short16 __ovld __cnfn convert_short16_rtz(short16); +short16 __ovld __cnfn convert_short16_sat_rtz(short16); +short16 __ovld __cnfn convert_short16_rtp(short16); +short16 __ovld __cnfn convert_short16_sat_rtp(short16); +short16 __ovld __cnfn convert_short16_rtn(short16); +short16 __ovld __cnfn convert_short16_sat_rtn(short16); +short16 __ovld __cnfn convert_short16(short16); +short16 __ovld __cnfn convert_short16_sat(short16); +short16 __ovld __cnfn convert_short16_rte(ushort16); +short16 __ovld __cnfn convert_short16_sat_rte(ushort16); +short16 __ovld __cnfn convert_short16_rtz(ushort16); +short16 __ovld __cnfn convert_short16_sat_rtz(ushort16); +short16 __ovld __cnfn convert_short16_rtp(ushort16); +short16 __ovld __cnfn convert_short16_sat_rtp(ushort16); +short16 __ovld __cnfn convert_short16_rtn(ushort16); +short16 __ovld __cnfn convert_short16_sat_rtn(ushort16); +short16 __ovld __cnfn convert_short16(ushort16); +short16 __ovld __cnfn convert_short16_sat(ushort16); +short16 __ovld __cnfn convert_short16_rte(int16); +short16 __ovld __cnfn convert_short16_sat_rte(int16); +short16 __ovld __cnfn convert_short16_rtz(int16); +short16 __ovld __cnfn convert_short16_sat_rtz(int16); +short16 __ovld __cnfn convert_short16_rtp(int16); +short16 __ovld __cnfn convert_short16_sat_rtp(int16); +short16 __ovld __cnfn convert_short16_rtn(int16); +short16 __ovld __cnfn convert_short16_sat_rtn(int16); +short16 __ovld __cnfn convert_short16(int16); +short16 __ovld __cnfn convert_short16_sat(int16); +short16 __ovld __cnfn convert_short16_rte(uint16); +short16 __ovld __cnfn convert_short16_sat_rte(uint16); +short16 __ovld __cnfn convert_short16_rtz(uint16); +short16 __ovld __cnfn convert_short16_sat_rtz(uint16); +short16 __ovld __cnfn convert_short16_rtp(uint16); +short16 __ovld __cnfn convert_short16_sat_rtp(uint16); +short16 __ovld __cnfn convert_short16_rtn(uint16); +short16 __ovld __cnfn convert_short16_sat_rtn(uint16); +short16 __ovld __cnfn convert_short16(uint16); +short16 __ovld __cnfn convert_short16_sat(uint16); +short16 __ovld __cnfn convert_short16_rte(long16); +short16 __ovld __cnfn convert_short16_sat_rte(long16); +short16 __ovld __cnfn convert_short16_rtz(long16); +short16 __ovld __cnfn convert_short16_sat_rtz(long16); +short16 __ovld __cnfn convert_short16_rtp(long16); +short16 __ovld __cnfn convert_short16_sat_rtp(long16); +short16 __ovld __cnfn convert_short16_rtn(long16); +short16 __ovld __cnfn convert_short16_sat_rtn(long16); +short16 __ovld __cnfn convert_short16(long16); +short16 __ovld __cnfn convert_short16_sat(long16); +short16 __ovld __cnfn convert_short16_rte(ulong16); +short16 __ovld __cnfn convert_short16_sat_rte(ulong16); +short16 __ovld __cnfn convert_short16_rtz(ulong16); +short16 __ovld __cnfn convert_short16_sat_rtz(ulong16); +short16 __ovld __cnfn convert_short16_rtp(ulong16); +short16 __ovld __cnfn convert_short16_sat_rtp(ulong16); +short16 __ovld __cnfn convert_short16_rtn(ulong16); +short16 __ovld __cnfn convert_short16_sat_rtn(ulong16); +short16 __ovld __cnfn convert_short16(ulong16); +short16 __ovld __cnfn convert_short16_sat(ulong16); +short16 __ovld __cnfn convert_short16_rte(float16); +short16 __ovld __cnfn convert_short16_sat_rte(float16); +short16 __ovld __cnfn convert_short16_rtz(float16); +short16 __ovld __cnfn convert_short16_sat_rtz(float16); +short16 __ovld __cnfn convert_short16_rtp(float16); +short16 __ovld __cnfn convert_short16_sat_rtp(float16); +short16 __ovld __cnfn convert_short16_rtn(float16); +short16 __ovld __cnfn convert_short16_sat_rtn(float16); +short16 __ovld __cnfn convert_short16(float16); +short16 __ovld __cnfn convert_short16_sat(float16); +ushort16 __ovld __cnfn convert_ushort16_rte(char16); +ushort16 __ovld __cnfn convert_ushort16_sat_rte(char16); +ushort16 __ovld __cnfn convert_ushort16_rtz(char16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtz(char16); +ushort16 __ovld __cnfn convert_ushort16_rtp(char16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtp(char16); +ushort16 __ovld __cnfn convert_ushort16_rtn(char16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtn(char16); +ushort16 __ovld __cnfn convert_ushort16(char16); +ushort16 __ovld __cnfn convert_ushort16_sat(char16); +ushort16 __ovld __cnfn convert_ushort16_rte(uchar16); +ushort16 __ovld __cnfn convert_ushort16_sat_rte(uchar16); +ushort16 __ovld __cnfn convert_ushort16_rtz(uchar16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtz(uchar16); +ushort16 __ovld __cnfn convert_ushort16_rtp(uchar16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtp(uchar16); +ushort16 __ovld __cnfn convert_ushort16_rtn(uchar16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtn(uchar16); +ushort16 __ovld __cnfn convert_ushort16(uchar16); +ushort16 __ovld __cnfn convert_ushort16_sat(uchar16); +ushort16 __ovld __cnfn convert_ushort16_rte(short16); +ushort16 __ovld __cnfn convert_ushort16_sat_rte(short16); +ushort16 __ovld __cnfn convert_ushort16_rtz(short16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtz(short16); +ushort16 __ovld __cnfn convert_ushort16_rtp(short16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtp(short16); +ushort16 __ovld __cnfn convert_ushort16_rtn(short16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtn(short16); +ushort16 __ovld __cnfn convert_ushort16(short16); +ushort16 __ovld __cnfn convert_ushort16_sat(short16); +ushort16 __ovld __cnfn convert_ushort16_rte(ushort16); +ushort16 __ovld __cnfn convert_ushort16_sat_rte(ushort16); +ushort16 __ovld __cnfn convert_ushort16_rtz(ushort16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtz(ushort16); +ushort16 __ovld __cnfn convert_ushort16_rtp(ushort16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtp(ushort16); +ushort16 __ovld __cnfn convert_ushort16_rtn(ushort16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtn(ushort16); +ushort16 __ovld __cnfn convert_ushort16(ushort16); +ushort16 __ovld __cnfn convert_ushort16_sat(ushort16); +ushort16 __ovld __cnfn convert_ushort16_rte(int16); +ushort16 __ovld __cnfn convert_ushort16_sat_rte(int16); +ushort16 __ovld __cnfn convert_ushort16_rtz(int16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtz(int16); +ushort16 __ovld __cnfn convert_ushort16_rtp(int16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtp(int16); +ushort16 __ovld __cnfn convert_ushort16_rtn(int16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtn(int16); +ushort16 __ovld __cnfn convert_ushort16(int16); +ushort16 __ovld __cnfn convert_ushort16_sat(int16); +ushort16 __ovld __cnfn convert_ushort16_rte(uint16); +ushort16 __ovld __cnfn convert_ushort16_sat_rte(uint16); +ushort16 __ovld __cnfn convert_ushort16_rtz(uint16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtz(uint16); +ushort16 __ovld __cnfn convert_ushort16_rtp(uint16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtp(uint16); +ushort16 __ovld __cnfn convert_ushort16_rtn(uint16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtn(uint16); +ushort16 __ovld __cnfn convert_ushort16(uint16); +ushort16 __ovld __cnfn convert_ushort16_sat(uint16); +ushort16 __ovld __cnfn convert_ushort16_rte(long16); +ushort16 __ovld __cnfn convert_ushort16_sat_rte(long16); +ushort16 __ovld __cnfn convert_ushort16_rtz(long16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtz(long16); +ushort16 __ovld __cnfn convert_ushort16_rtp(long16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtp(long16); +ushort16 __ovld __cnfn convert_ushort16_rtn(long16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtn(long16); +ushort16 __ovld __cnfn convert_ushort16(long16); +ushort16 __ovld __cnfn convert_ushort16_sat(long16); +ushort16 __ovld __cnfn convert_ushort16_rte(ulong16); +ushort16 __ovld __cnfn convert_ushort16_sat_rte(ulong16); +ushort16 __ovld __cnfn convert_ushort16_rtz(ulong16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtz(ulong16); +ushort16 __ovld __cnfn convert_ushort16_rtp(ulong16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtp(ulong16); +ushort16 __ovld __cnfn convert_ushort16_rtn(ulong16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtn(ulong16); +ushort16 __ovld __cnfn convert_ushort16(ulong16); +ushort16 __ovld __cnfn convert_ushort16_sat(ulong16); +ushort16 __ovld __cnfn convert_ushort16_rte(float16); +ushort16 __ovld __cnfn convert_ushort16_sat_rte(float16); +ushort16 __ovld __cnfn convert_ushort16_rtz(float16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtz(float16); +ushort16 __ovld __cnfn convert_ushort16_rtp(float16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtp(float16); +ushort16 __ovld __cnfn convert_ushort16_rtn(float16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtn(float16); +ushort16 __ovld __cnfn convert_ushort16(float16); +ushort16 __ovld __cnfn convert_ushort16_sat(float16); +int16 __ovld __cnfn convert_int16_rte(char16); +int16 __ovld __cnfn convert_int16_sat_rte(char16); +int16 __ovld __cnfn convert_int16_rtz(char16); +int16 __ovld __cnfn convert_int16_sat_rtz(char16); +int16 __ovld __cnfn convert_int16_rtp(char16); +int16 __ovld __cnfn convert_int16_sat_rtp(char16); +int16 __ovld __cnfn convert_int16_rtn(char16); +int16 __ovld __cnfn convert_int16_sat_rtn(char16); +int16 __ovld __cnfn convert_int16(char16); +int16 __ovld __cnfn convert_int16_sat(char16); +int16 __ovld __cnfn convert_int16_rte(uchar16); +int16 __ovld __cnfn convert_int16_sat_rte(uchar16); +int16 __ovld __cnfn convert_int16_rtz(uchar16); +int16 __ovld __cnfn convert_int16_sat_rtz(uchar16); +int16 __ovld __cnfn convert_int16_rtp(uchar16); +int16 __ovld __cnfn convert_int16_sat_rtp(uchar16); +int16 __ovld __cnfn convert_int16_rtn(uchar16); +int16 __ovld __cnfn convert_int16_sat_rtn(uchar16); +int16 __ovld __cnfn convert_int16(uchar16); +int16 __ovld __cnfn convert_int16_sat(uchar16); +int16 __ovld __cnfn convert_int16_rte(short16); +int16 __ovld __cnfn convert_int16_sat_rte(short16); +int16 __ovld __cnfn convert_int16_rtz(short16); +int16 __ovld __cnfn convert_int16_sat_rtz(short16); +int16 __ovld __cnfn convert_int16_rtp(short16); +int16 __ovld __cnfn convert_int16_sat_rtp(short16); +int16 __ovld __cnfn convert_int16_rtn(short16); +int16 __ovld __cnfn convert_int16_sat_rtn(short16); +int16 __ovld __cnfn convert_int16(short16); +int16 __ovld __cnfn convert_int16_sat(short16); +int16 __ovld __cnfn convert_int16_rte(ushort16); +int16 __ovld __cnfn convert_int16_sat_rte(ushort16); +int16 __ovld __cnfn convert_int16_rtz(ushort16); +int16 __ovld __cnfn convert_int16_sat_rtz(ushort16); +int16 __ovld __cnfn convert_int16_rtp(ushort16); +int16 __ovld __cnfn convert_int16_sat_rtp(ushort16); +int16 __ovld __cnfn convert_int16_rtn(ushort16); +int16 __ovld __cnfn convert_int16_sat_rtn(ushort16); +int16 __ovld __cnfn convert_int16(ushort16); +int16 __ovld __cnfn convert_int16_sat(ushort16); +int16 __ovld __cnfn convert_int16_rte(int16); +int16 __ovld __cnfn convert_int16_sat_rte(int16); +int16 __ovld __cnfn convert_int16_rtz(int16); +int16 __ovld __cnfn convert_int16_sat_rtz(int16); +int16 __ovld __cnfn convert_int16_rtp(int16); +int16 __ovld __cnfn convert_int16_sat_rtp(int16); +int16 __ovld __cnfn convert_int16_rtn(int16); +int16 __ovld __cnfn convert_int16_sat_rtn(int16); +int16 __ovld __cnfn convert_int16(int16); +int16 __ovld __cnfn convert_int16_sat(int16); +int16 __ovld __cnfn convert_int16_rte(uint16); +int16 __ovld __cnfn convert_int16_sat_rte(uint16); +int16 __ovld __cnfn convert_int16_rtz(uint16); +int16 __ovld __cnfn convert_int16_sat_rtz(uint16); +int16 __ovld __cnfn convert_int16_rtp(uint16); +int16 __ovld __cnfn convert_int16_sat_rtp(uint16); +int16 __ovld __cnfn convert_int16_rtn(uint16); +int16 __ovld __cnfn convert_int16_sat_rtn(uint16); +int16 __ovld __cnfn convert_int16(uint16); +int16 __ovld __cnfn convert_int16_sat(uint16); +int16 __ovld __cnfn convert_int16_rte(long16); +int16 __ovld __cnfn convert_int16_sat_rte(long16); +int16 __ovld __cnfn convert_int16_rtz(long16); +int16 __ovld __cnfn convert_int16_sat_rtz(long16); +int16 __ovld __cnfn convert_int16_rtp(long16); +int16 __ovld __cnfn convert_int16_sat_rtp(long16); +int16 __ovld __cnfn convert_int16_rtn(long16); +int16 __ovld __cnfn convert_int16_sat_rtn(long16); +int16 __ovld __cnfn convert_int16(long16); +int16 __ovld __cnfn convert_int16_sat(long16); +int16 __ovld __cnfn convert_int16_rte(ulong16); +int16 __ovld __cnfn convert_int16_sat_rte(ulong16); +int16 __ovld __cnfn convert_int16_rtz(ulong16); +int16 __ovld __cnfn convert_int16_sat_rtz(ulong16); +int16 __ovld __cnfn convert_int16_rtp(ulong16); +int16 __ovld __cnfn convert_int16_sat_rtp(ulong16); +int16 __ovld __cnfn convert_int16_rtn(ulong16); +int16 __ovld __cnfn convert_int16_sat_rtn(ulong16); +int16 __ovld __cnfn convert_int16(ulong16); +int16 __ovld __cnfn convert_int16_sat(ulong16); +int16 __ovld __cnfn convert_int16_rte(float16); +int16 __ovld __cnfn convert_int16_sat_rte(float16); +int16 __ovld __cnfn convert_int16_rtz(float16); +int16 __ovld __cnfn convert_int16_sat_rtz(float16); +int16 __ovld __cnfn convert_int16_rtp(float16); +int16 __ovld __cnfn convert_int16_sat_rtp(float16); +int16 __ovld __cnfn convert_int16_rtn(float16); +int16 __ovld __cnfn convert_int16_sat_rtn(float16); +int16 __ovld __cnfn convert_int16(float16); +int16 __ovld __cnfn convert_int16_sat(float16); +uint16 __ovld __cnfn convert_uint16_rte(char16); +uint16 __ovld __cnfn convert_uint16_sat_rte(char16); +uint16 __ovld __cnfn convert_uint16_rtz(char16); +uint16 __ovld __cnfn convert_uint16_sat_rtz(char16); +uint16 __ovld __cnfn convert_uint16_rtp(char16); +uint16 __ovld __cnfn convert_uint16_sat_rtp(char16); +uint16 __ovld __cnfn convert_uint16_rtn(char16); +uint16 __ovld __cnfn convert_uint16_sat_rtn(char16); +uint16 __ovld __cnfn convert_uint16(char16); +uint16 __ovld __cnfn convert_uint16_sat(char16); +uint16 __ovld __cnfn convert_uint16_rte(uchar16); +uint16 __ovld __cnfn convert_uint16_sat_rte(uchar16); +uint16 __ovld __cnfn convert_uint16_rtz(uchar16); +uint16 __ovld __cnfn convert_uint16_sat_rtz(uchar16); +uint16 __ovld __cnfn convert_uint16_rtp(uchar16); +uint16 __ovld __cnfn convert_uint16_sat_rtp(uchar16); +uint16 __ovld __cnfn convert_uint16_rtn(uchar16); +uint16 __ovld __cnfn convert_uint16_sat_rtn(uchar16); +uint16 __ovld __cnfn convert_uint16(uchar16); +uint16 __ovld __cnfn convert_uint16_sat(uchar16); +uint16 __ovld __cnfn convert_uint16_rte(short16); +uint16 __ovld __cnfn convert_uint16_sat_rte(short16); +uint16 __ovld __cnfn convert_uint16_rtz(short16); +uint16 __ovld __cnfn convert_uint16_sat_rtz(short16); +uint16 __ovld __cnfn convert_uint16_rtp(short16); +uint16 __ovld __cnfn convert_uint16_sat_rtp(short16); +uint16 __ovld __cnfn convert_uint16_rtn(short16); +uint16 __ovld __cnfn convert_uint16_sat_rtn(short16); +uint16 __ovld __cnfn convert_uint16(short16); +uint16 __ovld __cnfn convert_uint16_sat(short16); +uint16 __ovld __cnfn convert_uint16_rte(ushort16); +uint16 __ovld __cnfn convert_uint16_sat_rte(ushort16); +uint16 __ovld __cnfn convert_uint16_rtz(ushort16); +uint16 __ovld __cnfn convert_uint16_sat_rtz(ushort16); +uint16 __ovld __cnfn convert_uint16_rtp(ushort16); +uint16 __ovld __cnfn convert_uint16_sat_rtp(ushort16); +uint16 __ovld __cnfn convert_uint16_rtn(ushort16); +uint16 __ovld __cnfn convert_uint16_sat_rtn(ushort16); +uint16 __ovld __cnfn convert_uint16(ushort16); +uint16 __ovld __cnfn convert_uint16_sat(ushort16); +uint16 __ovld __cnfn convert_uint16_rte(int16); +uint16 __ovld __cnfn convert_uint16_sat_rte(int16); +uint16 __ovld __cnfn convert_uint16_rtz(int16); +uint16 __ovld __cnfn convert_uint16_sat_rtz(int16); +uint16 __ovld __cnfn convert_uint16_rtp(int16); +uint16 __ovld __cnfn convert_uint16_sat_rtp(int16); +uint16 __ovld __cnfn convert_uint16_rtn(int16); +uint16 __ovld __cnfn convert_uint16_sat_rtn(int16); +uint16 __ovld __cnfn convert_uint16(int16); +uint16 __ovld __cnfn convert_uint16_sat(int16); +uint16 __ovld __cnfn convert_uint16_rte(uint16); +uint16 __ovld __cnfn convert_uint16_sat_rte(uint16); +uint16 __ovld __cnfn convert_uint16_rtz(uint16); +uint16 __ovld __cnfn convert_uint16_sat_rtz(uint16); +uint16 __ovld __cnfn convert_uint16_rtp(uint16); +uint16 __ovld __cnfn convert_uint16_sat_rtp(uint16); +uint16 __ovld __cnfn convert_uint16_rtn(uint16); +uint16 __ovld __cnfn convert_uint16_sat_rtn(uint16); +uint16 __ovld __cnfn convert_uint16(uint16); +uint16 __ovld __cnfn convert_uint16_sat(uint16); +uint16 __ovld __cnfn convert_uint16_rte(long16); +uint16 __ovld __cnfn convert_uint16_sat_rte(long16); +uint16 __ovld __cnfn convert_uint16_rtz(long16); +uint16 __ovld __cnfn convert_uint16_sat_rtz(long16); +uint16 __ovld __cnfn convert_uint16_rtp(long16); +uint16 __ovld __cnfn convert_uint16_sat_rtp(long16); +uint16 __ovld __cnfn convert_uint16_rtn(long16); +uint16 __ovld __cnfn convert_uint16_sat_rtn(long16); +uint16 __ovld __cnfn convert_uint16(long16); +uint16 __ovld __cnfn convert_uint16_sat(long16); +uint16 __ovld __cnfn convert_uint16_rte(ulong16); +uint16 __ovld __cnfn convert_uint16_sat_rte(ulong16); +uint16 __ovld __cnfn convert_uint16_rtz(ulong16); +uint16 __ovld __cnfn convert_uint16_sat_rtz(ulong16); +uint16 __ovld __cnfn convert_uint16_rtp(ulong16); +uint16 __ovld __cnfn convert_uint16_sat_rtp(ulong16); +uint16 __ovld __cnfn convert_uint16_rtn(ulong16); +uint16 __ovld __cnfn convert_uint16_sat_rtn(ulong16); +uint16 __ovld __cnfn convert_uint16(ulong16); +uint16 __ovld __cnfn convert_uint16_sat(ulong16); +uint16 __ovld __cnfn convert_uint16_rte(float16); +uint16 __ovld __cnfn convert_uint16_sat_rte(float16); +uint16 __ovld __cnfn convert_uint16_rtz(float16); +uint16 __ovld __cnfn convert_uint16_sat_rtz(float16); +uint16 __ovld __cnfn convert_uint16_rtp(float16); +uint16 __ovld __cnfn convert_uint16_sat_rtp(float16); +uint16 __ovld __cnfn convert_uint16_rtn(float16); +uint16 __ovld __cnfn convert_uint16_sat_rtn(float16); +uint16 __ovld __cnfn convert_uint16(float16); +uint16 __ovld __cnfn convert_uint16_sat(float16); +long16 __ovld __cnfn convert_long16_rte(char16); +long16 __ovld __cnfn convert_long16_sat_rte(char16); +long16 __ovld __cnfn convert_long16_rtz(char16); +long16 __ovld __cnfn convert_long16_sat_rtz(char16); +long16 __ovld __cnfn convert_long16_rtp(char16); +long16 __ovld __cnfn convert_long16_sat_rtp(char16); +long16 __ovld __cnfn convert_long16_rtn(char16); +long16 __ovld __cnfn convert_long16_sat_rtn(char16); +long16 __ovld __cnfn convert_long16(char16); +long16 __ovld __cnfn convert_long16_sat(char16); +long16 __ovld __cnfn convert_long16_rte(uchar16); +long16 __ovld __cnfn convert_long16_sat_rte(uchar16); +long16 __ovld __cnfn convert_long16_rtz(uchar16); +long16 __ovld __cnfn convert_long16_sat_rtz(uchar16); +long16 __ovld __cnfn convert_long16_rtp(uchar16); +long16 __ovld __cnfn convert_long16_sat_rtp(uchar16); +long16 __ovld __cnfn convert_long16_rtn(uchar16); +long16 __ovld __cnfn convert_long16_sat_rtn(uchar16); +long16 __ovld __cnfn convert_long16(uchar16); +long16 __ovld __cnfn convert_long16_sat(uchar16); +long16 __ovld __cnfn convert_long16_rte(short16); +long16 __ovld __cnfn convert_long16_sat_rte(short16); +long16 __ovld __cnfn convert_long16_rtz(short16); +long16 __ovld __cnfn convert_long16_sat_rtz(short16); +long16 __ovld __cnfn convert_long16_rtp(short16); +long16 __ovld __cnfn convert_long16_sat_rtp(short16); +long16 __ovld __cnfn convert_long16_rtn(short16); +long16 __ovld __cnfn convert_long16_sat_rtn(short16); +long16 __ovld __cnfn convert_long16(short16); +long16 __ovld __cnfn convert_long16_sat(short16); +long16 __ovld __cnfn convert_long16_rte(ushort16); +long16 __ovld __cnfn convert_long16_sat_rte(ushort16); +long16 __ovld __cnfn convert_long16_rtz(ushort16); +long16 __ovld __cnfn convert_long16_sat_rtz(ushort16); +long16 __ovld __cnfn convert_long16_rtp(ushort16); +long16 __ovld __cnfn convert_long16_sat_rtp(ushort16); +long16 __ovld __cnfn convert_long16_rtn(ushort16); +long16 __ovld __cnfn convert_long16_sat_rtn(ushort16); +long16 __ovld __cnfn convert_long16(ushort16); +long16 __ovld __cnfn convert_long16_sat(ushort16); +long16 __ovld __cnfn convert_long16_rte(int16); +long16 __ovld __cnfn convert_long16_sat_rte(int16); +long16 __ovld __cnfn convert_long16_rtz(int16); +long16 __ovld __cnfn convert_long16_sat_rtz(int16); +long16 __ovld __cnfn convert_long16_rtp(int16); +long16 __ovld __cnfn convert_long16_sat_rtp(int16); +long16 __ovld __cnfn convert_long16_rtn(int16); +long16 __ovld __cnfn convert_long16_sat_rtn(int16); +long16 __ovld __cnfn convert_long16(int16); +long16 __ovld __cnfn convert_long16_sat(int16); +long16 __ovld __cnfn convert_long16_rte(uint16); +long16 __ovld __cnfn convert_long16_sat_rte(uint16); +long16 __ovld __cnfn convert_long16_rtz(uint16); +long16 __ovld __cnfn convert_long16_sat_rtz(uint16); +long16 __ovld __cnfn convert_long16_rtp(uint16); +long16 __ovld __cnfn convert_long16_sat_rtp(uint16); +long16 __ovld __cnfn convert_long16_rtn(uint16); +long16 __ovld __cnfn convert_long16_sat_rtn(uint16); +long16 __ovld __cnfn convert_long16(uint16); +long16 __ovld __cnfn convert_long16_sat(uint16); +long16 __ovld __cnfn convert_long16_rte(long16); +long16 __ovld __cnfn convert_long16_sat_rte(long16); +long16 __ovld __cnfn convert_long16_rtz(long16); +long16 __ovld __cnfn convert_long16_sat_rtz(long16); +long16 __ovld __cnfn convert_long16_rtp(long16); +long16 __ovld __cnfn convert_long16_sat_rtp(long16); +long16 __ovld __cnfn convert_long16_rtn(long16); +long16 __ovld __cnfn convert_long16_sat_rtn(long16); +long16 __ovld __cnfn convert_long16(long16); +long16 __ovld __cnfn convert_long16_sat(long16); +long16 __ovld __cnfn convert_long16_rte(ulong16); +long16 __ovld __cnfn convert_long16_sat_rte(ulong16); +long16 __ovld __cnfn convert_long16_rtz(ulong16); +long16 __ovld __cnfn convert_long16_sat_rtz(ulong16); +long16 __ovld __cnfn convert_long16_rtp(ulong16); +long16 __ovld __cnfn convert_long16_sat_rtp(ulong16); +long16 __ovld __cnfn convert_long16_rtn(ulong16); +long16 __ovld __cnfn convert_long16_sat_rtn(ulong16); +long16 __ovld __cnfn convert_long16(ulong16); +long16 __ovld __cnfn convert_long16_sat(ulong16); +long16 __ovld __cnfn convert_long16_rte(float16); +long16 __ovld __cnfn convert_long16_sat_rte(float16); +long16 __ovld __cnfn convert_long16_rtz(float16); +long16 __ovld __cnfn convert_long16_sat_rtz(float16); +long16 __ovld __cnfn convert_long16_rtp(float16); +long16 __ovld __cnfn convert_long16_sat_rtp(float16); +long16 __ovld __cnfn convert_long16_rtn(float16); +long16 __ovld __cnfn convert_long16_sat_rtn(float16); +long16 __ovld __cnfn convert_long16(float16); +long16 __ovld __cnfn convert_long16_sat(float16); +ulong16 __ovld __cnfn convert_ulong16_rte(char16); +ulong16 __ovld __cnfn convert_ulong16_sat_rte(char16); +ulong16 __ovld __cnfn convert_ulong16_rtz(char16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtz(char16); +ulong16 __ovld __cnfn convert_ulong16_rtp(char16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtp(char16); +ulong16 __ovld __cnfn convert_ulong16_rtn(char16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtn(char16); +ulong16 __ovld __cnfn convert_ulong16(char16); +ulong16 __ovld __cnfn convert_ulong16_sat(char16); +ulong16 __ovld __cnfn convert_ulong16_rte(uchar16); +ulong16 __ovld __cnfn convert_ulong16_sat_rte(uchar16); +ulong16 __ovld __cnfn convert_ulong16_rtz(uchar16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtz(uchar16); +ulong16 __ovld __cnfn convert_ulong16_rtp(uchar16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtp(uchar16); +ulong16 __ovld __cnfn convert_ulong16_rtn(uchar16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtn(uchar16); +ulong16 __ovld __cnfn convert_ulong16(uchar16); +ulong16 __ovld __cnfn convert_ulong16_sat(uchar16); +ulong16 __ovld __cnfn convert_ulong16_rte(short16); +ulong16 __ovld __cnfn convert_ulong16_sat_rte(short16); +ulong16 __ovld __cnfn convert_ulong16_rtz(short16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtz(short16); +ulong16 __ovld __cnfn convert_ulong16_rtp(short16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtp(short16); +ulong16 __ovld __cnfn convert_ulong16_rtn(short16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtn(short16); +ulong16 __ovld __cnfn convert_ulong16(short16); +ulong16 __ovld __cnfn convert_ulong16_sat(short16); +ulong16 __ovld __cnfn convert_ulong16_rte(ushort16); +ulong16 __ovld __cnfn convert_ulong16_sat_rte(ushort16); +ulong16 __ovld __cnfn convert_ulong16_rtz(ushort16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtz(ushort16); +ulong16 __ovld __cnfn convert_ulong16_rtp(ushort16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtp(ushort16); +ulong16 __ovld __cnfn convert_ulong16_rtn(ushort16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtn(ushort16); +ulong16 __ovld __cnfn convert_ulong16(ushort16); +ulong16 __ovld __cnfn convert_ulong16_sat(ushort16); +ulong16 __ovld __cnfn convert_ulong16_rte(int16); +ulong16 __ovld __cnfn convert_ulong16_sat_rte(int16); +ulong16 __ovld __cnfn convert_ulong16_rtz(int16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtz(int16); +ulong16 __ovld __cnfn convert_ulong16_rtp(int16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtp(int16); +ulong16 __ovld __cnfn convert_ulong16_rtn(int16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtn(int16); +ulong16 __ovld __cnfn convert_ulong16(int16); +ulong16 __ovld __cnfn convert_ulong16_sat(int16); +ulong16 __ovld __cnfn convert_ulong16_rte(uint16); +ulong16 __ovld __cnfn convert_ulong16_sat_rte(uint16); +ulong16 __ovld __cnfn convert_ulong16_rtz(uint16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtz(uint16); +ulong16 __ovld __cnfn convert_ulong16_rtp(uint16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtp(uint16); +ulong16 __ovld __cnfn convert_ulong16_rtn(uint16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtn(uint16); +ulong16 __ovld __cnfn convert_ulong16(uint16); +ulong16 __ovld __cnfn convert_ulong16_sat(uint16); +ulong16 __ovld __cnfn convert_ulong16_rte(long16); +ulong16 __ovld __cnfn convert_ulong16_sat_rte(long16); +ulong16 __ovld __cnfn convert_ulong16_rtz(long16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtz(long16); +ulong16 __ovld __cnfn convert_ulong16_rtp(long16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtp(long16); +ulong16 __ovld __cnfn convert_ulong16_rtn(long16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtn(long16); +ulong16 __ovld __cnfn convert_ulong16(long16); +ulong16 __ovld __cnfn convert_ulong16_sat(long16); +ulong16 __ovld __cnfn convert_ulong16_rte(ulong16); +ulong16 __ovld __cnfn convert_ulong16_sat_rte(ulong16); +ulong16 __ovld __cnfn convert_ulong16_rtz(ulong16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtz(ulong16); +ulong16 __ovld __cnfn convert_ulong16_rtp(ulong16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtp(ulong16); +ulong16 __ovld __cnfn convert_ulong16_rtn(ulong16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtn(ulong16); +ulong16 __ovld __cnfn convert_ulong16(ulong16); +ulong16 __ovld __cnfn convert_ulong16_sat(ulong16); +ulong16 __ovld __cnfn convert_ulong16_rte(float16); +ulong16 __ovld __cnfn convert_ulong16_sat_rte(float16); +ulong16 __ovld __cnfn convert_ulong16_rtz(float16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtz(float16); +ulong16 __ovld __cnfn convert_ulong16_rtp(float16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtp(float16); +ulong16 __ovld __cnfn convert_ulong16_rtn(float16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtn(float16); +ulong16 __ovld __cnfn convert_ulong16(float16); +ulong16 __ovld __cnfn convert_ulong16_sat(float16); +float16 __ovld __cnfn convert_float16_rte(char16); +float16 __ovld __cnfn convert_float16_rtz(char16); +float16 __ovld __cnfn convert_float16_rtp(char16); +float16 __ovld __cnfn convert_float16_rtn(char16); +float16 __ovld __cnfn convert_float16(char16); +float16 __ovld __cnfn convert_float16_rte(uchar16); +float16 __ovld __cnfn convert_float16_rtz(uchar16); +float16 __ovld __cnfn convert_float16_rtp(uchar16); +float16 __ovld __cnfn convert_float16_rtn(uchar16); +float16 __ovld __cnfn convert_float16(uchar16); +float16 __ovld __cnfn convert_float16_rte(short16); +float16 __ovld __cnfn convert_float16_rtz(short16); +float16 __ovld __cnfn convert_float16_rtp(short16); +float16 __ovld __cnfn convert_float16_rtn(short16); +float16 __ovld __cnfn convert_float16(short16); +float16 __ovld __cnfn convert_float16_rte(ushort16); +float16 __ovld __cnfn convert_float16_rtz(ushort16); +float16 __ovld __cnfn convert_float16_rtp(ushort16); +float16 __ovld __cnfn convert_float16_rtn(ushort16); +float16 __ovld __cnfn convert_float16(ushort16); +float16 __ovld __cnfn convert_float16_rte(int16); +float16 __ovld __cnfn convert_float16_rtz(int16); +float16 __ovld __cnfn convert_float16_rtp(int16); +float16 __ovld __cnfn convert_float16_rtn(int16); +float16 __ovld __cnfn convert_float16(int16); +float16 __ovld __cnfn convert_float16_rte(uint16); +float16 __ovld __cnfn convert_float16_rtz(uint16); +float16 __ovld __cnfn convert_float16_rtp(uint16); +float16 __ovld __cnfn convert_float16_rtn(uint16); +float16 __ovld __cnfn convert_float16(uint16); +float16 __ovld __cnfn convert_float16_rte(long16); +float16 __ovld __cnfn convert_float16_rtz(long16); +float16 __ovld __cnfn convert_float16_rtp(long16); +float16 __ovld __cnfn convert_float16_rtn(long16); +float16 __ovld __cnfn convert_float16(long16); +float16 __ovld __cnfn convert_float16_rte(ulong16); +float16 __ovld __cnfn convert_float16_rtz(ulong16); +float16 __ovld __cnfn convert_float16_rtp(ulong16); +float16 __ovld __cnfn convert_float16_rtn(ulong16); +float16 __ovld __cnfn convert_float16(ulong16); +float16 __ovld __cnfn convert_float16_rte(float16); +float16 __ovld __cnfn convert_float16_rtz(float16); +float16 __ovld __cnfn convert_float16_rtp(float16); +float16 __ovld __cnfn convert_float16_rtn(float16); +float16 __ovld __cnfn convert_float16(float16); + +// Conversions with double data type parameters or return value. + +#ifdef cl_khr_fp64 +char __ovld __cnfn convert_char(double); +char __ovld __cnfn convert_char_rte(double); +char __ovld __cnfn convert_char_rtn(double); +char __ovld __cnfn convert_char_rtp(double); +char __ovld __cnfn convert_char_rtz(double); +char __ovld __cnfn convert_char_sat(double); +char __ovld __cnfn convert_char_sat_rte(double); +char __ovld __cnfn convert_char_sat_rtn(double); +char __ovld __cnfn convert_char_sat_rtp(double); +char __ovld __cnfn convert_char_sat_rtz(double); +char2 __ovld __cnfn convert_char2(double2); +char2 __ovld __cnfn convert_char2_rte(double2); +char2 __ovld __cnfn convert_char2_rtn(double2); +char2 __ovld __cnfn convert_char2_rtp(double2); +char2 __ovld __cnfn convert_char2_rtz(double2); +char2 __ovld __cnfn convert_char2_sat(double2); +char2 __ovld __cnfn convert_char2_sat_rte(double2); +char2 __ovld __cnfn convert_char2_sat_rtn(double2); +char2 __ovld __cnfn convert_char2_sat_rtp(double2); +char2 __ovld __cnfn convert_char2_sat_rtz(double2); +char3 __ovld __cnfn convert_char3(double3); +char3 __ovld __cnfn convert_char3_rte(double3); +char3 __ovld __cnfn convert_char3_rtn(double3); +char3 __ovld __cnfn convert_char3_rtp(double3); +char3 __ovld __cnfn convert_char3_rtz(double3); +char3 __ovld __cnfn convert_char3_sat(double3); +char3 __ovld __cnfn convert_char3_sat_rte(double3); +char3 __ovld __cnfn convert_char3_sat_rtn(double3); +char3 __ovld __cnfn convert_char3_sat_rtp(double3); +char3 __ovld __cnfn convert_char3_sat_rtz(double3); +char4 __ovld __cnfn convert_char4(double4); +char4 __ovld __cnfn convert_char4_rte(double4); +char4 __ovld __cnfn convert_char4_rtn(double4); +char4 __ovld __cnfn convert_char4_rtp(double4); +char4 __ovld __cnfn convert_char4_rtz(double4); +char4 __ovld __cnfn convert_char4_sat(double4); +char4 __ovld __cnfn convert_char4_sat_rte(double4); +char4 __ovld __cnfn convert_char4_sat_rtn(double4); +char4 __ovld __cnfn convert_char4_sat_rtp(double4); +char4 __ovld __cnfn convert_char4_sat_rtz(double4); +char8 __ovld __cnfn convert_char8(double8); +char8 __ovld __cnfn convert_char8_rte(double8); +char8 __ovld __cnfn convert_char8_rtn(double8); +char8 __ovld __cnfn convert_char8_rtp(double8); +char8 __ovld __cnfn convert_char8_rtz(double8); +char8 __ovld __cnfn convert_char8_sat(double8); +char8 __ovld __cnfn convert_char8_sat_rte(double8); +char8 __ovld __cnfn convert_char8_sat_rtn(double8); +char8 __ovld __cnfn convert_char8_sat_rtp(double8); +char8 __ovld __cnfn convert_char8_sat_rtz(double8); +char16 __ovld __cnfn convert_char16(double16); +char16 __ovld __cnfn convert_char16_rte(double16); +char16 __ovld __cnfn convert_char16_rtn(double16); +char16 __ovld __cnfn convert_char16_rtp(double16); +char16 __ovld __cnfn convert_char16_rtz(double16); +char16 __ovld __cnfn convert_char16_sat(double16); +char16 __ovld __cnfn convert_char16_sat_rte(double16); +char16 __ovld __cnfn convert_char16_sat_rtn(double16); +char16 __ovld __cnfn convert_char16_sat_rtp(double16); +char16 __ovld __cnfn convert_char16_sat_rtz(double16); + +uchar __ovld __cnfn convert_uchar(double); +uchar __ovld __cnfn convert_uchar_rte(double); +uchar __ovld __cnfn convert_uchar_rtn(double); +uchar __ovld __cnfn convert_uchar_rtp(double); +uchar __ovld __cnfn convert_uchar_rtz(double); +uchar __ovld __cnfn convert_uchar_sat(double); +uchar __ovld __cnfn convert_uchar_sat_rte(double); +uchar __ovld __cnfn convert_uchar_sat_rtn(double); +uchar __ovld __cnfn convert_uchar_sat_rtp(double); +uchar __ovld __cnfn convert_uchar_sat_rtz(double); +uchar2 __ovld __cnfn convert_uchar2(double2); +uchar2 __ovld __cnfn convert_uchar2_rte(double2); +uchar2 __ovld __cnfn convert_uchar2_rtn(double2); +uchar2 __ovld __cnfn convert_uchar2_rtp(double2); +uchar2 __ovld __cnfn convert_uchar2_rtz(double2); +uchar2 __ovld __cnfn convert_uchar2_sat(double2); +uchar2 __ovld __cnfn convert_uchar2_sat_rte(double2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtn(double2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtp(double2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtz(double2); +uchar3 __ovld __cnfn convert_uchar3(double3); +uchar3 __ovld __cnfn convert_uchar3_rte(double3); +uchar3 __ovld __cnfn convert_uchar3_rtn(double3); +uchar3 __ovld __cnfn convert_uchar3_rtp(double3); +uchar3 __ovld __cnfn convert_uchar3_rtz(double3); +uchar3 __ovld __cnfn convert_uchar3_sat(double3); +uchar3 __ovld __cnfn convert_uchar3_sat_rte(double3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtn(double3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtp(double3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtz(double3); +uchar4 __ovld __cnfn convert_uchar4(double4); +uchar4 __ovld __cnfn convert_uchar4_rte(double4); +uchar4 __ovld __cnfn convert_uchar4_rtn(double4); +uchar4 __ovld __cnfn convert_uchar4_rtp(double4); +uchar4 __ovld __cnfn convert_uchar4_rtz(double4); +uchar4 __ovld __cnfn convert_uchar4_sat(double4); +uchar4 __ovld __cnfn convert_uchar4_sat_rte(double4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtn(double4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtp(double4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtz(double4); +uchar8 __ovld __cnfn convert_uchar8(double8); +uchar8 __ovld __cnfn convert_uchar8_rte(double8); +uchar8 __ovld __cnfn convert_uchar8_rtn(double8); +uchar8 __ovld __cnfn convert_uchar8_rtp(double8); +uchar8 __ovld __cnfn convert_uchar8_rtz(double8); +uchar8 __ovld __cnfn convert_uchar8_sat(double8); +uchar8 __ovld __cnfn convert_uchar8_sat_rte(double8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtn(double8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtp(double8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtz(double8); +uchar16 __ovld __cnfn convert_uchar16(double16); +uchar16 __ovld __cnfn convert_uchar16_rte(double16); +uchar16 __ovld __cnfn convert_uchar16_rtn(double16); +uchar16 __ovld __cnfn convert_uchar16_rtp(double16); +uchar16 __ovld __cnfn convert_uchar16_rtz(double16); +uchar16 __ovld __cnfn convert_uchar16_sat(double16); +uchar16 __ovld __cnfn convert_uchar16_sat_rte(double16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtn(double16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtp(double16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtz(double16); + +short __ovld __cnfn convert_short(double); +short __ovld __cnfn convert_short_rte(double); +short __ovld __cnfn convert_short_rtn(double); +short __ovld __cnfn convert_short_rtp(double); +short __ovld __cnfn convert_short_rtz(double); +short __ovld __cnfn convert_short_sat(double); +short __ovld __cnfn convert_short_sat_rte(double); +short __ovld __cnfn convert_short_sat_rtn(double); +short __ovld __cnfn convert_short_sat_rtp(double); +short __ovld __cnfn convert_short_sat_rtz(double); +short2 __ovld __cnfn convert_short2(double2); +short2 __ovld __cnfn convert_short2_rte(double2); +short2 __ovld __cnfn convert_short2_rtn(double2); +short2 __ovld __cnfn convert_short2_rtp(double2); +short2 __ovld __cnfn convert_short2_rtz(double2); +short2 __ovld __cnfn convert_short2_sat(double2); +short2 __ovld __cnfn convert_short2_sat_rte(double2); +short2 __ovld __cnfn convert_short2_sat_rtn(double2); +short2 __ovld __cnfn convert_short2_sat_rtp(double2); +short2 __ovld __cnfn convert_short2_sat_rtz(double2); +short3 __ovld __cnfn convert_short3(double3); +short3 __ovld __cnfn convert_short3_rte(double3); +short3 __ovld __cnfn convert_short3_rtn(double3); +short3 __ovld __cnfn convert_short3_rtp(double3); +short3 __ovld __cnfn convert_short3_rtz(double3); +short3 __ovld __cnfn convert_short3_sat(double3); +short3 __ovld __cnfn convert_short3_sat_rte(double3); +short3 __ovld __cnfn convert_short3_sat_rtn(double3); +short3 __ovld __cnfn convert_short3_sat_rtp(double3); +short3 __ovld __cnfn convert_short3_sat_rtz(double3); +short4 __ovld __cnfn convert_short4(double4); +short4 __ovld __cnfn convert_short4_rte(double4); +short4 __ovld __cnfn convert_short4_rtn(double4); +short4 __ovld __cnfn convert_short4_rtp(double4); +short4 __ovld __cnfn convert_short4_rtz(double4); +short4 __ovld __cnfn convert_short4_sat(double4); +short4 __ovld __cnfn convert_short4_sat_rte(double4); +short4 __ovld __cnfn convert_short4_sat_rtn(double4); +short4 __ovld __cnfn convert_short4_sat_rtp(double4); +short4 __ovld __cnfn convert_short4_sat_rtz(double4); +short8 __ovld __cnfn convert_short8(double8); +short8 __ovld __cnfn convert_short8_rte(double8); +short8 __ovld __cnfn convert_short8_rtn(double8); +short8 __ovld __cnfn convert_short8_rtp(double8); +short8 __ovld __cnfn convert_short8_rtz(double8); +short8 __ovld __cnfn convert_short8_sat(double8); +short8 __ovld __cnfn convert_short8_sat_rte(double8); +short8 __ovld __cnfn convert_short8_sat_rtn(double8); +short8 __ovld __cnfn convert_short8_sat_rtp(double8); +short8 __ovld __cnfn convert_short8_sat_rtz(double8); +short16 __ovld __cnfn convert_short16(double16); +short16 __ovld __cnfn convert_short16_rte(double16); +short16 __ovld __cnfn convert_short16_rtn(double16); +short16 __ovld __cnfn convert_short16_rtp(double16); +short16 __ovld __cnfn convert_short16_rtz(double16); +short16 __ovld __cnfn convert_short16_sat(double16); +short16 __ovld __cnfn convert_short16_sat_rte(double16); +short16 __ovld __cnfn convert_short16_sat_rtn(double16); +short16 __ovld __cnfn convert_short16_sat_rtp(double16); +short16 __ovld __cnfn convert_short16_sat_rtz(double16); + +ushort __ovld __cnfn convert_ushort(double); +ushort __ovld __cnfn convert_ushort_rte(double); +ushort __ovld __cnfn convert_ushort_rtn(double); +ushort __ovld __cnfn convert_ushort_rtp(double); +ushort __ovld __cnfn convert_ushort_rtz(double); +ushort __ovld __cnfn convert_ushort_sat(double); +ushort __ovld __cnfn convert_ushort_sat_rte(double); +ushort __ovld __cnfn convert_ushort_sat_rtn(double); +ushort __ovld __cnfn convert_ushort_sat_rtp(double); +ushort __ovld __cnfn convert_ushort_sat_rtz(double); +ushort2 __ovld __cnfn convert_ushort2(double2); +ushort2 __ovld __cnfn convert_ushort2_rte(double2); +ushort2 __ovld __cnfn convert_ushort2_rtn(double2); +ushort2 __ovld __cnfn convert_ushort2_rtp(double2); +ushort2 __ovld __cnfn convert_ushort2_rtz(double2); +ushort2 __ovld __cnfn convert_ushort2_sat(double2); +ushort2 __ovld __cnfn convert_ushort2_sat_rte(double2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtn(double2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtp(double2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtz(double2); +ushort3 __ovld __cnfn convert_ushort3(double3); +ushort3 __ovld __cnfn convert_ushort3_rte(double3); +ushort3 __ovld __cnfn convert_ushort3_rtn(double3); +ushort3 __ovld __cnfn convert_ushort3_rtp(double3); +ushort3 __ovld __cnfn convert_ushort3_rtz(double3); +ushort3 __ovld __cnfn convert_ushort3_sat(double3); +ushort3 __ovld __cnfn convert_ushort3_sat_rte(double3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtn(double3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtp(double3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtz(double3); +ushort4 __ovld __cnfn convert_ushort4(double4); +ushort4 __ovld __cnfn convert_ushort4_rte(double4); +ushort4 __ovld __cnfn convert_ushort4_rtn(double4); +ushort4 __ovld __cnfn convert_ushort4_rtp(double4); +ushort4 __ovld __cnfn convert_ushort4_rtz(double4); +ushort4 __ovld __cnfn convert_ushort4_sat(double4); +ushort4 __ovld __cnfn convert_ushort4_sat_rte(double4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtn(double4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtp(double4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtz(double4); +ushort8 __ovld __cnfn convert_ushort8(double8); +ushort8 __ovld __cnfn convert_ushort8_rte(double8); +ushort8 __ovld __cnfn convert_ushort8_rtn(double8); +ushort8 __ovld __cnfn convert_ushort8_rtp(double8); +ushort8 __ovld __cnfn convert_ushort8_rtz(double8); +ushort8 __ovld __cnfn convert_ushort8_sat(double8); +ushort8 __ovld __cnfn convert_ushort8_sat_rte(double8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtn(double8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtp(double8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtz(double8); +ushort16 __ovld __cnfn convert_ushort16(double16); +ushort16 __ovld __cnfn convert_ushort16_rte(double16); +ushort16 __ovld __cnfn convert_ushort16_rtn(double16); +ushort16 __ovld __cnfn convert_ushort16_rtp(double16); +ushort16 __ovld __cnfn convert_ushort16_rtz(double16); +ushort16 __ovld __cnfn convert_ushort16_sat(double16); +ushort16 __ovld __cnfn convert_ushort16_sat_rte(double16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtn(double16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtp(double16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtz(double16); + +int __ovld __cnfn convert_int(double); +int __ovld __cnfn convert_int_rte(double); +int __ovld __cnfn convert_int_rtn(double); +int __ovld __cnfn convert_int_rtp(double); +int __ovld __cnfn convert_int_rtz(double); +int __ovld __cnfn convert_int_sat(double); +int __ovld __cnfn convert_int_sat_rte(double); +int __ovld __cnfn convert_int_sat_rtn(double); +int __ovld __cnfn convert_int_sat_rtp(double); +int __ovld __cnfn convert_int_sat_rtz(double); +int2 __ovld __cnfn convert_int2(double2); +int2 __ovld __cnfn convert_int2_rte(double2); +int2 __ovld __cnfn convert_int2_rtn(double2); +int2 __ovld __cnfn convert_int2_rtp(double2); +int2 __ovld __cnfn convert_int2_rtz(double2); +int2 __ovld __cnfn convert_int2_sat(double2); +int2 __ovld __cnfn convert_int2_sat_rte(double2); +int2 __ovld __cnfn convert_int2_sat_rtn(double2); +int2 __ovld __cnfn convert_int2_sat_rtp(double2); +int2 __ovld __cnfn convert_int2_sat_rtz(double2); +int3 __ovld __cnfn convert_int3(double3); +int3 __ovld __cnfn convert_int3_rte(double3); +int3 __ovld __cnfn convert_int3_rtn(double3); +int3 __ovld __cnfn convert_int3_rtp(double3); +int3 __ovld __cnfn convert_int3_rtz(double3); +int3 __ovld __cnfn convert_int3_sat(double3); +int3 __ovld __cnfn convert_int3_sat_rte(double3); +int3 __ovld __cnfn convert_int3_sat_rtn(double3); +int3 __ovld __cnfn convert_int3_sat_rtp(double3); +int3 __ovld __cnfn convert_int3_sat_rtz(double3); +int4 __ovld __cnfn convert_int4(double4); +int4 __ovld __cnfn convert_int4_rte(double4); +int4 __ovld __cnfn convert_int4_rtn(double4); +int4 __ovld __cnfn convert_int4_rtp(double4); +int4 __ovld __cnfn convert_int4_rtz(double4); +int4 __ovld __cnfn convert_int4_sat(double4); +int4 __ovld __cnfn convert_int4_sat_rte(double4); +int4 __ovld __cnfn convert_int4_sat_rtn(double4); +int4 __ovld __cnfn convert_int4_sat_rtp(double4); +int4 __ovld __cnfn convert_int4_sat_rtz(double4); +int8 __ovld __cnfn convert_int8(double8); +int8 __ovld __cnfn convert_int8_rte(double8); +int8 __ovld __cnfn convert_int8_rtn(double8); +int8 __ovld __cnfn convert_int8_rtp(double8); +int8 __ovld __cnfn convert_int8_rtz(double8); +int8 __ovld __cnfn convert_int8_sat(double8); +int8 __ovld __cnfn convert_int8_sat_rte(double8); +int8 __ovld __cnfn convert_int8_sat_rtn(double8); +int8 __ovld __cnfn convert_int8_sat_rtp(double8); +int8 __ovld __cnfn convert_int8_sat_rtz(double8); +int16 __ovld __cnfn convert_int16(double16); +int16 __ovld __cnfn convert_int16_rte(double16); +int16 __ovld __cnfn convert_int16_rtn(double16); +int16 __ovld __cnfn convert_int16_rtp(double16); +int16 __ovld __cnfn convert_int16_rtz(double16); +int16 __ovld __cnfn convert_int16_sat(double16); +int16 __ovld __cnfn convert_int16_sat_rte(double16); +int16 __ovld __cnfn convert_int16_sat_rtn(double16); +int16 __ovld __cnfn convert_int16_sat_rtp(double16); +int16 __ovld __cnfn convert_int16_sat_rtz(double16); + +uint __ovld __cnfn convert_uint(double); +uint __ovld __cnfn convert_uint_rte(double); +uint __ovld __cnfn convert_uint_rtn(double); +uint __ovld __cnfn convert_uint_rtp(double); +uint __ovld __cnfn convert_uint_rtz(double); +uint __ovld __cnfn convert_uint_sat(double); +uint __ovld __cnfn convert_uint_sat_rte(double); +uint __ovld __cnfn convert_uint_sat_rtn(double); +uint __ovld __cnfn convert_uint_sat_rtp(double); +uint __ovld __cnfn convert_uint_sat_rtz(double); +uint2 __ovld __cnfn convert_uint2(double2); +uint2 __ovld __cnfn convert_uint2_rte(double2); +uint2 __ovld __cnfn convert_uint2_rtn(double2); +uint2 __ovld __cnfn convert_uint2_rtp(double2); +uint2 __ovld __cnfn convert_uint2_rtz(double2); +uint2 __ovld __cnfn convert_uint2_sat(double2); +uint2 __ovld __cnfn convert_uint2_sat_rte(double2); +uint2 __ovld __cnfn convert_uint2_sat_rtn(double2); +uint2 __ovld __cnfn convert_uint2_sat_rtp(double2); +uint2 __ovld __cnfn convert_uint2_sat_rtz(double2); +uint3 __ovld __cnfn convert_uint3(double3); +uint3 __ovld __cnfn convert_uint3_rte(double3); +uint3 __ovld __cnfn convert_uint3_rtn(double3); +uint3 __ovld __cnfn convert_uint3_rtp(double3); +uint3 __ovld __cnfn convert_uint3_rtz(double3); +uint3 __ovld __cnfn convert_uint3_sat(double3); +uint3 __ovld __cnfn convert_uint3_sat_rte(double3); +uint3 __ovld __cnfn convert_uint3_sat_rtn(double3); +uint3 __ovld __cnfn convert_uint3_sat_rtp(double3); +uint3 __ovld __cnfn convert_uint3_sat_rtz(double3); +uint4 __ovld __cnfn convert_uint4(double4); +uint4 __ovld __cnfn convert_uint4_rte(double4); +uint4 __ovld __cnfn convert_uint4_rtn(double4); +uint4 __ovld __cnfn convert_uint4_rtp(double4); +uint4 __ovld __cnfn convert_uint4_rtz(double4); +uint4 __ovld __cnfn convert_uint4_sat(double4); +uint4 __ovld __cnfn convert_uint4_sat_rte(double4); +uint4 __ovld __cnfn convert_uint4_sat_rtn(double4); +uint4 __ovld __cnfn convert_uint4_sat_rtp(double4); +uint4 __ovld __cnfn convert_uint4_sat_rtz(double4); +uint8 __ovld __cnfn convert_uint8(double8); +uint8 __ovld __cnfn convert_uint8_rte(double8); +uint8 __ovld __cnfn convert_uint8_rtn(double8); +uint8 __ovld __cnfn convert_uint8_rtp(double8); +uint8 __ovld __cnfn convert_uint8_rtz(double8); +uint8 __ovld __cnfn convert_uint8_sat(double8); +uint8 __ovld __cnfn convert_uint8_sat_rte(double8); +uint8 __ovld __cnfn convert_uint8_sat_rtn(double8); +uint8 __ovld __cnfn convert_uint8_sat_rtp(double8); +uint8 __ovld __cnfn convert_uint8_sat_rtz(double8); +uint16 __ovld __cnfn convert_uint16(double16); +uint16 __ovld __cnfn convert_uint16_rte(double16); +uint16 __ovld __cnfn convert_uint16_rtn(double16); +uint16 __ovld __cnfn convert_uint16_rtp(double16); +uint16 __ovld __cnfn convert_uint16_rtz(double16); +uint16 __ovld __cnfn convert_uint16_sat(double16); +uint16 __ovld __cnfn convert_uint16_sat_rte(double16); +uint16 __ovld __cnfn convert_uint16_sat_rtn(double16); +uint16 __ovld __cnfn convert_uint16_sat_rtp(double16); +uint16 __ovld __cnfn convert_uint16_sat_rtz(double16); + +long __ovld __cnfn convert_long(double); +long __ovld __cnfn convert_long_rte(double); +long __ovld __cnfn convert_long_rtn(double); +long __ovld __cnfn convert_long_rtp(double); +long __ovld __cnfn convert_long_rtz(double); +long __ovld __cnfn convert_long_sat(double); +long __ovld __cnfn convert_long_sat_rte(double); +long __ovld __cnfn convert_long_sat_rtn(double); +long __ovld __cnfn convert_long_sat_rtp(double); +long __ovld __cnfn convert_long_sat_rtz(double); +long2 __ovld __cnfn convert_long2(double2); +long2 __ovld __cnfn convert_long2_rte(double2); +long2 __ovld __cnfn convert_long2_rtn(double2); +long2 __ovld __cnfn convert_long2_rtp(double2); +long2 __ovld __cnfn convert_long2_rtz(double2); +long2 __ovld __cnfn convert_long2_sat(double2); +long2 __ovld __cnfn convert_long2_sat_rte(double2); +long2 __ovld __cnfn convert_long2_sat_rtn(double2); +long2 __ovld __cnfn convert_long2_sat_rtp(double2); +long2 __ovld __cnfn convert_long2_sat_rtz(double2); +long3 __ovld __cnfn convert_long3(double3); +long3 __ovld __cnfn convert_long3_rte(double3); +long3 __ovld __cnfn convert_long3_rtn(double3); +long3 __ovld __cnfn convert_long3_rtp(double3); +long3 __ovld __cnfn convert_long3_rtz(double3); +long3 __ovld __cnfn convert_long3_sat(double3); +long3 __ovld __cnfn convert_long3_sat_rte(double3); +long3 __ovld __cnfn convert_long3_sat_rtn(double3); +long3 __ovld __cnfn convert_long3_sat_rtp(double3); +long3 __ovld __cnfn convert_long3_sat_rtz(double3); +long4 __ovld __cnfn convert_long4(double4); +long4 __ovld __cnfn convert_long4_rte(double4); +long4 __ovld __cnfn convert_long4_rtn(double4); +long4 __ovld __cnfn convert_long4_rtp(double4); +long4 __ovld __cnfn convert_long4_rtz(double4); +long4 __ovld __cnfn convert_long4_sat(double4); +long4 __ovld __cnfn convert_long4_sat_rte(double4); +long4 __ovld __cnfn convert_long4_sat_rtn(double4); +long4 __ovld __cnfn convert_long4_sat_rtp(double4); +long4 __ovld __cnfn convert_long4_sat_rtz(double4); +long8 __ovld __cnfn convert_long8(double8); +long8 __ovld __cnfn convert_long8_rte(double8); +long8 __ovld __cnfn convert_long8_rtn(double8); +long8 __ovld __cnfn convert_long8_rtp(double8); +long8 __ovld __cnfn convert_long8_rtz(double8); +long8 __ovld __cnfn convert_long8_sat(double8); +long8 __ovld __cnfn convert_long8_sat_rte(double8); +long8 __ovld __cnfn convert_long8_sat_rtn(double8); +long8 __ovld __cnfn convert_long8_sat_rtp(double8); +long8 __ovld __cnfn convert_long8_sat_rtz(double8); +long16 __ovld __cnfn convert_long16(double16); +long16 __ovld __cnfn convert_long16_rte(double16); +long16 __ovld __cnfn convert_long16_rtn(double16); +long16 __ovld __cnfn convert_long16_rtp(double16); +long16 __ovld __cnfn convert_long16_rtz(double16); +long16 __ovld __cnfn convert_long16_sat(double16); +long16 __ovld __cnfn convert_long16_sat_rte(double16); +long16 __ovld __cnfn convert_long16_sat_rtn(double16); +long16 __ovld __cnfn convert_long16_sat_rtp(double16); +long16 __ovld __cnfn convert_long16_sat_rtz(double16); + +ulong __ovld __cnfn convert_ulong(double); +ulong __ovld __cnfn convert_ulong_rte(double); +ulong __ovld __cnfn convert_ulong_rtn(double); +ulong __ovld __cnfn convert_ulong_rtp(double); +ulong __ovld __cnfn convert_ulong_rtz(double); +ulong __ovld __cnfn convert_ulong_sat(double); +ulong __ovld __cnfn convert_ulong_sat_rte(double); +ulong __ovld __cnfn convert_ulong_sat_rtn(double); +ulong __ovld __cnfn convert_ulong_sat_rtp(double); +ulong __ovld __cnfn convert_ulong_sat_rtz(double); +ulong2 __ovld __cnfn convert_ulong2(double2); +ulong2 __ovld __cnfn convert_ulong2_rte(double2); +ulong2 __ovld __cnfn convert_ulong2_rtn(double2); +ulong2 __ovld __cnfn convert_ulong2_rtp(double2); +ulong2 __ovld __cnfn convert_ulong2_rtz(double2); +ulong2 __ovld __cnfn convert_ulong2_sat(double2); +ulong2 __ovld __cnfn convert_ulong2_sat_rte(double2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtn(double2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtp(double2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtz(double2); +ulong3 __ovld __cnfn convert_ulong3(double3); +ulong3 __ovld __cnfn convert_ulong3_rte(double3); +ulong3 __ovld __cnfn convert_ulong3_rtn(double3); +ulong3 __ovld __cnfn convert_ulong3_rtp(double3); +ulong3 __ovld __cnfn convert_ulong3_rtz(double3); +ulong3 __ovld __cnfn convert_ulong3_sat(double3); +ulong3 __ovld __cnfn convert_ulong3_sat_rte(double3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtn(double3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtp(double3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtz(double3); +ulong4 __ovld __cnfn convert_ulong4(double4); +ulong4 __ovld __cnfn convert_ulong4_rte(double4); +ulong4 __ovld __cnfn convert_ulong4_rtn(double4); +ulong4 __ovld __cnfn convert_ulong4_rtp(double4); +ulong4 __ovld __cnfn convert_ulong4_rtz(double4); +ulong4 __ovld __cnfn convert_ulong4_sat(double4); +ulong4 __ovld __cnfn convert_ulong4_sat_rte(double4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtn(double4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtp(double4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtz(double4); +ulong8 __ovld __cnfn convert_ulong8(double8); +ulong8 __ovld __cnfn convert_ulong8_rte(double8); +ulong8 __ovld __cnfn convert_ulong8_rtn(double8); +ulong8 __ovld __cnfn convert_ulong8_rtp(double8); +ulong8 __ovld __cnfn convert_ulong8_rtz(double8); +ulong8 __ovld __cnfn convert_ulong8_sat(double8); +ulong8 __ovld __cnfn convert_ulong8_sat_rte(double8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtn(double8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtp(double8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtz(double8); +ulong16 __ovld __cnfn convert_ulong16(double16); +ulong16 __ovld __cnfn convert_ulong16_rte(double16); +ulong16 __ovld __cnfn convert_ulong16_rtn(double16); +ulong16 __ovld __cnfn convert_ulong16_rtp(double16); +ulong16 __ovld __cnfn convert_ulong16_rtz(double16); +ulong16 __ovld __cnfn convert_ulong16_sat(double16); +ulong16 __ovld __cnfn convert_ulong16_sat_rte(double16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtn(double16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtp(double16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtz(double16); + +float __ovld __cnfn convert_float(double); +float __ovld __cnfn convert_float_rte(double); +float __ovld __cnfn convert_float_rtn(double); +float __ovld __cnfn convert_float_rtp(double); +float __ovld __cnfn convert_float_rtz(double); +float2 __ovld __cnfn convert_float2(double2); +float2 __ovld __cnfn convert_float2_rte(double2); +float2 __ovld __cnfn convert_float2_rtn(double2); +float2 __ovld __cnfn convert_float2_rtp(double2); +float2 __ovld __cnfn convert_float2_rtz(double2); +float3 __ovld __cnfn convert_float3(double3); +float3 __ovld __cnfn convert_float3_rte(double3); +float3 __ovld __cnfn convert_float3_rtn(double3); +float3 __ovld __cnfn convert_float3_rtp(double3); +float3 __ovld __cnfn convert_float3_rtz(double3); +float4 __ovld __cnfn convert_float4(double4); +float4 __ovld __cnfn convert_float4_rte(double4); +float4 __ovld __cnfn convert_float4_rtn(double4); +float4 __ovld __cnfn convert_float4_rtp(double4); +float4 __ovld __cnfn convert_float4_rtz(double4); +float8 __ovld __cnfn convert_float8(double8); +float8 __ovld __cnfn convert_float8_rte(double8); +float8 __ovld __cnfn convert_float8_rtn(double8); +float8 __ovld __cnfn convert_float8_rtp(double8); +float8 __ovld __cnfn convert_float8_rtz(double8); +float16 __ovld __cnfn convert_float16(double16); +float16 __ovld __cnfn convert_float16_rte(double16); +float16 __ovld __cnfn convert_float16_rtn(double16); +float16 __ovld __cnfn convert_float16_rtp(double16); +float16 __ovld __cnfn convert_float16_rtz(double16); + +double __ovld __cnfn convert_double(char); +double __ovld __cnfn convert_double(double); +double __ovld __cnfn convert_double(float); +double __ovld __cnfn convert_double(int); +double __ovld __cnfn convert_double(long); +double __ovld __cnfn convert_double(short); +double __ovld __cnfn convert_double(uchar); +double __ovld __cnfn convert_double(uint); +double __ovld __cnfn convert_double(ulong); +double __ovld __cnfn convert_double(ushort); +double __ovld __cnfn convert_double_rte(char); +double __ovld __cnfn convert_double_rte(double); +double __ovld __cnfn convert_double_rte(float); +double __ovld __cnfn convert_double_rte(int); +double __ovld __cnfn convert_double_rte(long); +double __ovld __cnfn convert_double_rte(short); +double __ovld __cnfn convert_double_rte(uchar); +double __ovld __cnfn convert_double_rte(uint); +double __ovld __cnfn convert_double_rte(ulong); +double __ovld __cnfn convert_double_rte(ushort); +double __ovld __cnfn convert_double_rtn(char); +double __ovld __cnfn convert_double_rtn(double); +double __ovld __cnfn convert_double_rtn(float); +double __ovld __cnfn convert_double_rtn(int); +double __ovld __cnfn convert_double_rtn(long); +double __ovld __cnfn convert_double_rtn(short); +double __ovld __cnfn convert_double_rtn(uchar); +double __ovld __cnfn convert_double_rtn(uint); +double __ovld __cnfn convert_double_rtn(ulong); +double __ovld __cnfn convert_double_rtn(ushort); +double __ovld __cnfn convert_double_rtp(char); +double __ovld __cnfn convert_double_rtp(double); +double __ovld __cnfn convert_double_rtp(float); +double __ovld __cnfn convert_double_rtp(int); +double __ovld __cnfn convert_double_rtp(long); +double __ovld __cnfn convert_double_rtp(short); +double __ovld __cnfn convert_double_rtp(uchar); +double __ovld __cnfn convert_double_rtp(uint); +double __ovld __cnfn convert_double_rtp(ulong); +double __ovld __cnfn convert_double_rtp(ushort); +double __ovld __cnfn convert_double_rtz(char); +double __ovld __cnfn convert_double_rtz(double); +double __ovld __cnfn convert_double_rtz(float); +double __ovld __cnfn convert_double_rtz(int); +double __ovld __cnfn convert_double_rtz(long); +double __ovld __cnfn convert_double_rtz(short); +double __ovld __cnfn convert_double_rtz(uchar); +double __ovld __cnfn convert_double_rtz(uint); +double __ovld __cnfn convert_double_rtz(ulong); +double __ovld __cnfn convert_double_rtz(ushort); +double2 __ovld __cnfn convert_double2(char2); +double2 __ovld __cnfn convert_double2(double2); +double2 __ovld __cnfn convert_double2(float2); +double2 __ovld __cnfn convert_double2(int2); +double2 __ovld __cnfn convert_double2(long2); +double2 __ovld __cnfn convert_double2(short2); +double2 __ovld __cnfn convert_double2(uchar2); +double2 __ovld __cnfn convert_double2(uint2); +double2 __ovld __cnfn convert_double2(ulong2); +double2 __ovld __cnfn convert_double2(ushort2); +double2 __ovld __cnfn convert_double2_rte(char2); +double2 __ovld __cnfn convert_double2_rte(double2); +double2 __ovld __cnfn convert_double2_rte(float2); +double2 __ovld __cnfn convert_double2_rte(int2); +double2 __ovld __cnfn convert_double2_rte(long2); +double2 __ovld __cnfn convert_double2_rte(short2); +double2 __ovld __cnfn convert_double2_rte(uchar2); +double2 __ovld __cnfn convert_double2_rte(uint2); +double2 __ovld __cnfn convert_double2_rte(ulong2); +double2 __ovld __cnfn convert_double2_rte(ushort2); +double2 __ovld __cnfn convert_double2_rtn(char2); +double2 __ovld __cnfn convert_double2_rtn(double2); +double2 __ovld __cnfn convert_double2_rtn(float2); +double2 __ovld __cnfn convert_double2_rtn(int2); +double2 __ovld __cnfn convert_double2_rtn(long2); +double2 __ovld __cnfn convert_double2_rtn(short2); +double2 __ovld __cnfn convert_double2_rtn(uchar2); +double2 __ovld __cnfn convert_double2_rtn(uint2); +double2 __ovld __cnfn convert_double2_rtn(ulong2); +double2 __ovld __cnfn convert_double2_rtn(ushort2); +double2 __ovld __cnfn convert_double2_rtp(char2); +double2 __ovld __cnfn convert_double2_rtp(double2); +double2 __ovld __cnfn convert_double2_rtp(float2); +double2 __ovld __cnfn convert_double2_rtp(int2); +double2 __ovld __cnfn convert_double2_rtp(long2); +double2 __ovld __cnfn convert_double2_rtp(short2); +double2 __ovld __cnfn convert_double2_rtp(uchar2); +double2 __ovld __cnfn convert_double2_rtp(uint2); +double2 __ovld __cnfn convert_double2_rtp(ulong2); +double2 __ovld __cnfn convert_double2_rtp(ushort2); +double2 __ovld __cnfn convert_double2_rtz(char2); +double2 __ovld __cnfn convert_double2_rtz(double2); +double2 __ovld __cnfn convert_double2_rtz(float2); +double2 __ovld __cnfn convert_double2_rtz(int2); +double2 __ovld __cnfn convert_double2_rtz(long2); +double2 __ovld __cnfn convert_double2_rtz(short2); +double2 __ovld __cnfn convert_double2_rtz(uchar2); +double2 __ovld __cnfn convert_double2_rtz(uint2); +double2 __ovld __cnfn convert_double2_rtz(ulong2); +double2 __ovld __cnfn convert_double2_rtz(ushort2); +double3 __ovld __cnfn convert_double3(char3); +double3 __ovld __cnfn convert_double3(double3); +double3 __ovld __cnfn convert_double3(float3); +double3 __ovld __cnfn convert_double3(int3); +double3 __ovld __cnfn convert_double3(long3); +double3 __ovld __cnfn convert_double3(short3); +double3 __ovld __cnfn convert_double3(uchar3); +double3 __ovld __cnfn convert_double3(uint3); +double3 __ovld __cnfn convert_double3(ulong3); +double3 __ovld __cnfn convert_double3(ushort3); +double3 __ovld __cnfn convert_double3_rte(char3); +double3 __ovld __cnfn convert_double3_rte(double3); +double3 __ovld __cnfn convert_double3_rte(float3); +double3 __ovld __cnfn convert_double3_rte(int3); +double3 __ovld __cnfn convert_double3_rte(long3); +double3 __ovld __cnfn convert_double3_rte(short3); +double3 __ovld __cnfn convert_double3_rte(uchar3); +double3 __ovld __cnfn convert_double3_rte(uint3); +double3 __ovld __cnfn convert_double3_rte(ulong3); +double3 __ovld __cnfn convert_double3_rte(ushort3); +double3 __ovld __cnfn convert_double3_rtn(char3); +double3 __ovld __cnfn convert_double3_rtn(double3); +double3 __ovld __cnfn convert_double3_rtn(float3); +double3 __ovld __cnfn convert_double3_rtn(int3); +double3 __ovld __cnfn convert_double3_rtn(long3); +double3 __ovld __cnfn convert_double3_rtn(short3); +double3 __ovld __cnfn convert_double3_rtn(uchar3); +double3 __ovld __cnfn convert_double3_rtn(uint3); +double3 __ovld __cnfn convert_double3_rtn(ulong3); +double3 __ovld __cnfn convert_double3_rtn(ushort3); +double3 __ovld __cnfn convert_double3_rtp(char3); +double3 __ovld __cnfn convert_double3_rtp(double3); +double3 __ovld __cnfn convert_double3_rtp(float3); +double3 __ovld __cnfn convert_double3_rtp(int3); +double3 __ovld __cnfn convert_double3_rtp(long3); +double3 __ovld __cnfn convert_double3_rtp(short3); +double3 __ovld __cnfn convert_double3_rtp(uchar3); +double3 __ovld __cnfn convert_double3_rtp(uint3); +double3 __ovld __cnfn convert_double3_rtp(ulong3); +double3 __ovld __cnfn convert_double3_rtp(ushort3); +double3 __ovld __cnfn convert_double3_rtz(char3); +double3 __ovld __cnfn convert_double3_rtz(double3); +double3 __ovld __cnfn convert_double3_rtz(float3); +double3 __ovld __cnfn convert_double3_rtz(int3); +double3 __ovld __cnfn convert_double3_rtz(long3); +double3 __ovld __cnfn convert_double3_rtz(short3); +double3 __ovld __cnfn convert_double3_rtz(uchar3); +double3 __ovld __cnfn convert_double3_rtz(uint3); +double3 __ovld __cnfn convert_double3_rtz(ulong3); +double3 __ovld __cnfn convert_double3_rtz(ushort3); +double4 __ovld __cnfn convert_double4(char4); +double4 __ovld __cnfn convert_double4(double4); +double4 __ovld __cnfn convert_double4(float4); +double4 __ovld __cnfn convert_double4(int4); +double4 __ovld __cnfn convert_double4(long4); +double4 __ovld __cnfn convert_double4(short4); +double4 __ovld __cnfn convert_double4(uchar4); +double4 __ovld __cnfn convert_double4(uint4); +double4 __ovld __cnfn convert_double4(ulong4); +double4 __ovld __cnfn convert_double4(ushort4); +double4 __ovld __cnfn convert_double4_rte(char4); +double4 __ovld __cnfn convert_double4_rte(double4); +double4 __ovld __cnfn convert_double4_rte(float4); +double4 __ovld __cnfn convert_double4_rte(int4); +double4 __ovld __cnfn convert_double4_rte(long4); +double4 __ovld __cnfn convert_double4_rte(short4); +double4 __ovld __cnfn convert_double4_rte(uchar4); +double4 __ovld __cnfn convert_double4_rte(uint4); +double4 __ovld __cnfn convert_double4_rte(ulong4); +double4 __ovld __cnfn convert_double4_rte(ushort4); +double4 __ovld __cnfn convert_double4_rtn(char4); +double4 __ovld __cnfn convert_double4_rtn(double4); +double4 __ovld __cnfn convert_double4_rtn(float4); +double4 __ovld __cnfn convert_double4_rtn(int4); +double4 __ovld __cnfn convert_double4_rtn(long4); +double4 __ovld __cnfn convert_double4_rtn(short4); +double4 __ovld __cnfn convert_double4_rtn(uchar4); +double4 __ovld __cnfn convert_double4_rtn(uint4); +double4 __ovld __cnfn convert_double4_rtn(ulong4); +double4 __ovld __cnfn convert_double4_rtn(ushort4); +double4 __ovld __cnfn convert_double4_rtp(char4); +double4 __ovld __cnfn convert_double4_rtp(double4); +double4 __ovld __cnfn convert_double4_rtp(float4); +double4 __ovld __cnfn convert_double4_rtp(int4); +double4 __ovld __cnfn convert_double4_rtp(long4); +double4 __ovld __cnfn convert_double4_rtp(short4); +double4 __ovld __cnfn convert_double4_rtp(uchar4); +double4 __ovld __cnfn convert_double4_rtp(uint4); +double4 __ovld __cnfn convert_double4_rtp(ulong4); +double4 __ovld __cnfn convert_double4_rtp(ushort4); +double4 __ovld __cnfn convert_double4_rtz(char4); +double4 __ovld __cnfn convert_double4_rtz(double4); +double4 __ovld __cnfn convert_double4_rtz(float4); +double4 __ovld __cnfn convert_double4_rtz(int4); +double4 __ovld __cnfn convert_double4_rtz(long4); +double4 __ovld __cnfn convert_double4_rtz(short4); +double4 __ovld __cnfn convert_double4_rtz(uchar4); +double4 __ovld __cnfn convert_double4_rtz(uint4); +double4 __ovld __cnfn convert_double4_rtz(ulong4); +double4 __ovld __cnfn convert_double4_rtz(ushort4); +double8 __ovld __cnfn convert_double8(char8); +double8 __ovld __cnfn convert_double8(double8); +double8 __ovld __cnfn convert_double8(float8); +double8 __ovld __cnfn convert_double8(int8); +double8 __ovld __cnfn convert_double8(long8); +double8 __ovld __cnfn convert_double8(short8); +double8 __ovld __cnfn convert_double8(uchar8); +double8 __ovld __cnfn convert_double8(uint8); +double8 __ovld __cnfn convert_double8(ulong8); +double8 __ovld __cnfn convert_double8(ushort8); +double8 __ovld __cnfn convert_double8_rte(char8); +double8 __ovld __cnfn convert_double8_rte(double8); +double8 __ovld __cnfn convert_double8_rte(float8); +double8 __ovld __cnfn convert_double8_rte(int8); +double8 __ovld __cnfn convert_double8_rte(long8); +double8 __ovld __cnfn convert_double8_rte(short8); +double8 __ovld __cnfn convert_double8_rte(uchar8); +double8 __ovld __cnfn convert_double8_rte(uint8); +double8 __ovld __cnfn convert_double8_rte(ulong8); +double8 __ovld __cnfn convert_double8_rte(ushort8); +double8 __ovld __cnfn convert_double8_rtn(char8); +double8 __ovld __cnfn convert_double8_rtn(double8); +double8 __ovld __cnfn convert_double8_rtn(float8); +double8 __ovld __cnfn convert_double8_rtn(int8); +double8 __ovld __cnfn convert_double8_rtn(long8); +double8 __ovld __cnfn convert_double8_rtn(short8); +double8 __ovld __cnfn convert_double8_rtn(uchar8); +double8 __ovld __cnfn convert_double8_rtn(uint8); +double8 __ovld __cnfn convert_double8_rtn(ulong8); +double8 __ovld __cnfn convert_double8_rtn(ushort8); +double8 __ovld __cnfn convert_double8_rtp(char8); +double8 __ovld __cnfn convert_double8_rtp(double8); +double8 __ovld __cnfn convert_double8_rtp(float8); +double8 __ovld __cnfn convert_double8_rtp(int8); +double8 __ovld __cnfn convert_double8_rtp(long8); +double8 __ovld __cnfn convert_double8_rtp(short8); +double8 __ovld __cnfn convert_double8_rtp(uchar8); +double8 __ovld __cnfn convert_double8_rtp(uint8); +double8 __ovld __cnfn convert_double8_rtp(ulong8); +double8 __ovld __cnfn convert_double8_rtp(ushort8); +double8 __ovld __cnfn convert_double8_rtz(char8); +double8 __ovld __cnfn convert_double8_rtz(double8); +double8 __ovld __cnfn convert_double8_rtz(float8); +double8 __ovld __cnfn convert_double8_rtz(int8); +double8 __ovld __cnfn convert_double8_rtz(long8); +double8 __ovld __cnfn convert_double8_rtz(short8); +double8 __ovld __cnfn convert_double8_rtz(uchar8); +double8 __ovld __cnfn convert_double8_rtz(uint8); +double8 __ovld __cnfn convert_double8_rtz(ulong8); +double8 __ovld __cnfn convert_double8_rtz(ushort8); +double16 __ovld __cnfn convert_double16(char16); +double16 __ovld __cnfn convert_double16(double16); +double16 __ovld __cnfn convert_double16(float16); +double16 __ovld __cnfn convert_double16(int16); +double16 __ovld __cnfn convert_double16(long16); +double16 __ovld __cnfn convert_double16(short16); +double16 __ovld __cnfn convert_double16(uchar16); +double16 __ovld __cnfn convert_double16(uint16); +double16 __ovld __cnfn convert_double16(ulong16); +double16 __ovld __cnfn convert_double16(ushort16); +double16 __ovld __cnfn convert_double16_rte(char16); +double16 __ovld __cnfn convert_double16_rte(double16); +double16 __ovld __cnfn convert_double16_rte(float16); +double16 __ovld __cnfn convert_double16_rte(int16); +double16 __ovld __cnfn convert_double16_rte(long16); +double16 __ovld __cnfn convert_double16_rte(short16); +double16 __ovld __cnfn convert_double16_rte(uchar16); +double16 __ovld __cnfn convert_double16_rte(uint16); +double16 __ovld __cnfn convert_double16_rte(ulong16); +double16 __ovld __cnfn convert_double16_rte(ushort16); +double16 __ovld __cnfn convert_double16_rtn(char16); +double16 __ovld __cnfn convert_double16_rtn(double16); +double16 __ovld __cnfn convert_double16_rtn(float16); +double16 __ovld __cnfn convert_double16_rtn(int16); +double16 __ovld __cnfn convert_double16_rtn(long16); +double16 __ovld __cnfn convert_double16_rtn(short16); +double16 __ovld __cnfn convert_double16_rtn(uchar16); +double16 __ovld __cnfn convert_double16_rtn(uint16); +double16 __ovld __cnfn convert_double16_rtn(ulong16); +double16 __ovld __cnfn convert_double16_rtn(ushort16); +double16 __ovld __cnfn convert_double16_rtp(char16); +double16 __ovld __cnfn convert_double16_rtp(double16); +double16 __ovld __cnfn convert_double16_rtp(float16); +double16 __ovld __cnfn convert_double16_rtp(int16); +double16 __ovld __cnfn convert_double16_rtp(long16); +double16 __ovld __cnfn convert_double16_rtp(short16); +double16 __ovld __cnfn convert_double16_rtp(uchar16); +double16 __ovld __cnfn convert_double16_rtp(uint16); +double16 __ovld __cnfn convert_double16_rtp(ulong16); +double16 __ovld __cnfn convert_double16_rtp(ushort16); +double16 __ovld __cnfn convert_double16_rtz(char16); +double16 __ovld __cnfn convert_double16_rtz(double16); +double16 __ovld __cnfn convert_double16_rtz(float16); +double16 __ovld __cnfn convert_double16_rtz(int16); +double16 __ovld __cnfn convert_double16_rtz(long16); +double16 __ovld __cnfn convert_double16_rtz(short16); +double16 __ovld __cnfn convert_double16_rtz(uchar16); +double16 __ovld __cnfn convert_double16_rtz(uint16); +double16 __ovld __cnfn convert_double16_rtz(ulong16); +double16 __ovld __cnfn convert_double16_rtz(ushort16); +#endif //cl_khr_fp64 + +#ifdef cl_khr_fp16 +// Convert half types to non-double types. +uchar __ovld __cnfn convert_uchar(half); +uchar __ovld __cnfn convert_uchar_rte(half); +uchar __ovld __cnfn convert_uchar_rtp(half); +uchar __ovld __cnfn convert_uchar_rtn(half); +uchar __ovld __cnfn convert_uchar_rtz(half); +uchar __ovld __cnfn convert_uchar_sat(half); +uchar __ovld __cnfn convert_uchar_sat_rte(half); +uchar __ovld __cnfn convert_uchar_sat_rtp(half); +uchar __ovld __cnfn convert_uchar_sat_rtn(half); +uchar __ovld __cnfn convert_uchar_sat_rtz(half); +uchar2 __ovld __cnfn convert_uchar2(half2); +uchar2 __ovld __cnfn convert_uchar2_rte(half2); +uchar2 __ovld __cnfn convert_uchar2_rtp(half2); +uchar2 __ovld __cnfn convert_uchar2_rtn(half2); +uchar2 __ovld __cnfn convert_uchar2_rtz(half2); +uchar2 __ovld __cnfn convert_uchar2_sat(half2); +uchar2 __ovld __cnfn convert_uchar2_sat_rte(half2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtp(half2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtn(half2); +uchar2 __ovld __cnfn convert_uchar2_sat_rtz(half2); +uchar3 __ovld __cnfn convert_uchar3(half3); +uchar3 __ovld __cnfn convert_uchar3_rte(half3); +uchar3 __ovld __cnfn convert_uchar3_rtp(half3); +uchar3 __ovld __cnfn convert_uchar3_rtn(half3); +uchar3 __ovld __cnfn convert_uchar3_rtz(half3); +uchar3 __ovld __cnfn convert_uchar3_sat(half3); +uchar3 __ovld __cnfn convert_uchar3_sat_rte(half3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtp(half3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtn(half3); +uchar3 __ovld __cnfn convert_uchar3_sat_rtz(half3); +uchar4 __ovld __cnfn convert_uchar4(half4); +uchar4 __ovld __cnfn convert_uchar4_rte(half4); +uchar4 __ovld __cnfn convert_uchar4_rtp(half4); +uchar4 __ovld __cnfn convert_uchar4_rtn(half4); +uchar4 __ovld __cnfn convert_uchar4_rtz(half4); +uchar4 __ovld __cnfn convert_uchar4_sat(half4); +uchar4 __ovld __cnfn convert_uchar4_sat_rte(half4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtp(half4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtn(half4); +uchar4 __ovld __cnfn convert_uchar4_sat_rtz(half4); +uchar8 __ovld __cnfn convert_uchar8(half8); +uchar8 __ovld __cnfn convert_uchar8_rte(half8); +uchar8 __ovld __cnfn convert_uchar8_rtp(half8); +uchar8 __ovld __cnfn convert_uchar8_rtn(half8); +uchar8 __ovld __cnfn convert_uchar8_rtz(half8); +uchar8 __ovld __cnfn convert_uchar8_sat(half8); +uchar8 __ovld __cnfn convert_uchar8_sat_rte(half8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtp(half8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtn(half8); +uchar8 __ovld __cnfn convert_uchar8_sat_rtz(half8); +uchar16 __ovld __cnfn convert_uchar16(half16); +uchar16 __ovld __cnfn convert_uchar16_rte(half16); +uchar16 __ovld __cnfn convert_uchar16_rtp(half16); +uchar16 __ovld __cnfn convert_uchar16_rtn(half16); +uchar16 __ovld __cnfn convert_uchar16_rtz(half16); +uchar16 __ovld __cnfn convert_uchar16_sat(half16); +uchar16 __ovld __cnfn convert_uchar16_sat_rte(half16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtp(half16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtn(half16); +uchar16 __ovld __cnfn convert_uchar16_sat_rtz(half16); +ushort __ovld __cnfn convert_ushort(half); +ushort __ovld __cnfn convert_ushort_rte(half); +ushort __ovld __cnfn convert_ushort_rtp(half); +ushort __ovld __cnfn convert_ushort_rtn(half); +ushort __ovld __cnfn convert_ushort_rtz(half); +ushort __ovld __cnfn convert_ushort_sat(half); +ushort __ovld __cnfn convert_ushort_sat_rte(half); +ushort __ovld __cnfn convert_ushort_sat_rtp(half); +ushort __ovld __cnfn convert_ushort_sat_rtn(half); +ushort __ovld __cnfn convert_ushort_sat_rtz(half); +ushort2 __ovld __cnfn convert_ushort2(half2); +ushort2 __ovld __cnfn convert_ushort2_rte(half2); +ushort2 __ovld __cnfn convert_ushort2_rtp(half2); +ushort2 __ovld __cnfn convert_ushort2_rtn(half2); +ushort2 __ovld __cnfn convert_ushort2_rtz(half2); +ushort2 __ovld __cnfn convert_ushort2_sat(half2); +ushort2 __ovld __cnfn convert_ushort2_sat_rte(half2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtp(half2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtn(half2); +ushort2 __ovld __cnfn convert_ushort2_sat_rtz(half2); +ushort3 __ovld __cnfn convert_ushort3(half3); +ushort3 __ovld __cnfn convert_ushort3_rte(half3); +ushort3 __ovld __cnfn convert_ushort3_rtp(half3); +ushort3 __ovld __cnfn convert_ushort3_rtn(half3); +ushort3 __ovld __cnfn convert_ushort3_rtz(half3); +ushort3 __ovld __cnfn convert_ushort3_sat(half3); +ushort3 __ovld __cnfn convert_ushort3_sat_rte(half3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtp(half3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtn(half3); +ushort3 __ovld __cnfn convert_ushort3_sat_rtz(half3); +ushort4 __ovld __cnfn convert_ushort4(half4); +ushort4 __ovld __cnfn convert_ushort4_rte(half4); +ushort4 __ovld __cnfn convert_ushort4_rtp(half4); +ushort4 __ovld __cnfn convert_ushort4_rtn(half4); +ushort4 __ovld __cnfn convert_ushort4_rtz(half4); +ushort4 __ovld __cnfn convert_ushort4_sat(half4); +ushort4 __ovld __cnfn convert_ushort4_sat_rte(half4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtp(half4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtn(half4); +ushort4 __ovld __cnfn convert_ushort4_sat_rtz(half4); +ushort8 __ovld __cnfn convert_ushort8(half8); +ushort8 __ovld __cnfn convert_ushort8_rte(half8); +ushort8 __ovld __cnfn convert_ushort8_rtp(half8); +ushort8 __ovld __cnfn convert_ushort8_rtn(half8); +ushort8 __ovld __cnfn convert_ushort8_rtz(half8); +ushort8 __ovld __cnfn convert_ushort8_sat(half8); +ushort8 __ovld __cnfn convert_ushort8_sat_rte(half8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtp(half8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtn(half8); +ushort8 __ovld __cnfn convert_ushort8_sat_rtz(half8); +ushort16 __ovld __cnfn convert_ushort16(half16); +ushort16 __ovld __cnfn convert_ushort16_rte(half16); +ushort16 __ovld __cnfn convert_ushort16_rtp(half16); +ushort16 __ovld __cnfn convert_ushort16_rtn(half16); +ushort16 __ovld __cnfn convert_ushort16_rtz(half16); +ushort16 __ovld __cnfn convert_ushort16_sat(half16); +ushort16 __ovld __cnfn convert_ushort16_sat_rte(half16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtp(half16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtn(half16); +ushort16 __ovld __cnfn convert_ushort16_sat_rtz(half16); +uint __ovld __cnfn convert_uint(half); +uint __ovld __cnfn convert_uint_rte(half); +uint __ovld __cnfn convert_uint_rtp(half); +uint __ovld __cnfn convert_uint_rtn(half); +uint __ovld __cnfn convert_uint_rtz(half); +uint __ovld __cnfn convert_uint_sat(half); +uint __ovld __cnfn convert_uint_sat_rte(half); +uint __ovld __cnfn convert_uint_sat_rtp(half); +uint __ovld __cnfn convert_uint_sat_rtn(half); +uint __ovld __cnfn convert_uint_sat_rtz(half); +uint2 __ovld __cnfn convert_uint2(half2); +uint2 __ovld __cnfn convert_uint2_rte(half2); +uint2 __ovld __cnfn convert_uint2_rtp(half2); +uint2 __ovld __cnfn convert_uint2_rtn(half2); +uint2 __ovld __cnfn convert_uint2_rtz(half2); +uint2 __ovld __cnfn convert_uint2_sat(half2); +uint2 __ovld __cnfn convert_uint2_sat_rte(half2); +uint2 __ovld __cnfn convert_uint2_sat_rtp(half2); +uint2 __ovld __cnfn convert_uint2_sat_rtn(half2); +uint2 __ovld __cnfn convert_uint2_sat_rtz(half2); +uint3 __ovld __cnfn convert_uint3(half3); +uint3 __ovld __cnfn convert_uint3_rte(half3); +uint3 __ovld __cnfn convert_uint3_rtp(half3); +uint3 __ovld __cnfn convert_uint3_rtn(half3); +uint3 __ovld __cnfn convert_uint3_rtz(half3); +uint3 __ovld __cnfn convert_uint3_sat(half3); +uint3 __ovld __cnfn convert_uint3_sat_rte(half3); +uint3 __ovld __cnfn convert_uint3_sat_rtp(half3); +uint3 __ovld __cnfn convert_uint3_sat_rtn(half3); +uint3 __ovld __cnfn convert_uint3_sat_rtz(half3); +uint4 __ovld __cnfn convert_uint4(half4); +uint4 __ovld __cnfn convert_uint4_rte(half4); +uint4 __ovld __cnfn convert_uint4_rtp(half4); +uint4 __ovld __cnfn convert_uint4_rtn(half4); +uint4 __ovld __cnfn convert_uint4_rtz(half4); +uint4 __ovld __cnfn convert_uint4_sat(half4); +uint4 __ovld __cnfn convert_uint4_sat_rte(half4); +uint4 __ovld __cnfn convert_uint4_sat_rtp(half4); +uint4 __ovld __cnfn convert_uint4_sat_rtn(half4); +uint4 __ovld __cnfn convert_uint4_sat_rtz(half4); +uint8 __ovld __cnfn convert_uint8(half8); +uint8 __ovld __cnfn convert_uint8_rte(half8); +uint8 __ovld __cnfn convert_uint8_rtp(half8); +uint8 __ovld __cnfn convert_uint8_rtn(half8); +uint8 __ovld __cnfn convert_uint8_rtz(half8); +uint8 __ovld __cnfn convert_uint8_sat(half8); +uint8 __ovld __cnfn convert_uint8_sat_rte(half8); +uint8 __ovld __cnfn convert_uint8_sat_rtp(half8); +uint8 __ovld __cnfn convert_uint8_sat_rtn(half8); +uint8 __ovld __cnfn convert_uint8_sat_rtz(half8); +uint16 __ovld __cnfn convert_uint16(half16); +uint16 __ovld __cnfn convert_uint16_rte(half16); +uint16 __ovld __cnfn convert_uint16_rtp(half16); +uint16 __ovld __cnfn convert_uint16_rtn(half16); +uint16 __ovld __cnfn convert_uint16_rtz(half16); +uint16 __ovld __cnfn convert_uint16_sat(half16); +uint16 __ovld __cnfn convert_uint16_sat_rte(half16); +uint16 __ovld __cnfn convert_uint16_sat_rtp(half16); +uint16 __ovld __cnfn convert_uint16_sat_rtn(half16); +uint16 __ovld __cnfn convert_uint16_sat_rtz(half16); +ulong __ovld __cnfn convert_ulong(half); +ulong __ovld __cnfn convert_ulong_rte(half); +ulong __ovld __cnfn convert_ulong_rtp(half); +ulong __ovld __cnfn convert_ulong_rtn(half); +ulong __ovld __cnfn convert_ulong_rtz(half); +ulong __ovld __cnfn convert_ulong_sat(half); +ulong __ovld __cnfn convert_ulong_sat_rte(half); +ulong __ovld __cnfn convert_ulong_sat_rtp(half); +ulong __ovld __cnfn convert_ulong_sat_rtn(half); +ulong __ovld __cnfn convert_ulong_sat_rtz(half); +ulong2 __ovld __cnfn convert_ulong2(half2); +ulong2 __ovld __cnfn convert_ulong2_rte(half2); +ulong2 __ovld __cnfn convert_ulong2_rtp(half2); +ulong2 __ovld __cnfn convert_ulong2_rtn(half2); +ulong2 __ovld __cnfn convert_ulong2_rtz(half2); +ulong2 __ovld __cnfn convert_ulong2_sat(half2); +ulong2 __ovld __cnfn convert_ulong2_sat_rte(half2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtp(half2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtn(half2); +ulong2 __ovld __cnfn convert_ulong2_sat_rtz(half2); +ulong3 __ovld __cnfn convert_ulong3(half3); +ulong3 __ovld __cnfn convert_ulong3_rte(half3); +ulong3 __ovld __cnfn convert_ulong3_rtp(half3); +ulong3 __ovld __cnfn convert_ulong3_rtn(half3); +ulong3 __ovld __cnfn convert_ulong3_rtz(half3); +ulong3 __ovld __cnfn convert_ulong3_sat(half3); +ulong3 __ovld __cnfn convert_ulong3_sat_rte(half3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtp(half3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtn(half3); +ulong3 __ovld __cnfn convert_ulong3_sat_rtz(half3); +ulong4 __ovld __cnfn convert_ulong4(half4); +ulong4 __ovld __cnfn convert_ulong4_rte(half4); +ulong4 __ovld __cnfn convert_ulong4_rtp(half4); +ulong4 __ovld __cnfn convert_ulong4_rtn(half4); +ulong4 __ovld __cnfn convert_ulong4_rtz(half4); +ulong4 __ovld __cnfn convert_ulong4_sat(half4); +ulong4 __ovld __cnfn convert_ulong4_sat_rte(half4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtp(half4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtn(half4); +ulong4 __ovld __cnfn convert_ulong4_sat_rtz(half4); +ulong8 __ovld __cnfn convert_ulong8(half8); +ulong8 __ovld __cnfn convert_ulong8_rte(half8); +ulong8 __ovld __cnfn convert_ulong8_rtp(half8); +ulong8 __ovld __cnfn convert_ulong8_rtn(half8); +ulong8 __ovld __cnfn convert_ulong8_rtz(half8); +ulong8 __ovld __cnfn convert_ulong8_sat(half8); +ulong8 __ovld __cnfn convert_ulong8_sat_rte(half8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtp(half8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtn(half8); +ulong8 __ovld __cnfn convert_ulong8_sat_rtz(half8); +ulong16 __ovld __cnfn convert_ulong16(half16); +ulong16 __ovld __cnfn convert_ulong16_rte(half16); +ulong16 __ovld __cnfn convert_ulong16_rtp(half16); +ulong16 __ovld __cnfn convert_ulong16_rtn(half16); +ulong16 __ovld __cnfn convert_ulong16_rtz(half16); +ulong16 __ovld __cnfn convert_ulong16_sat(half16); +ulong16 __ovld __cnfn convert_ulong16_sat_rte(half16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtp(half16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtn(half16); +ulong16 __ovld __cnfn convert_ulong16_sat_rtz(half16); +char __ovld __cnfn convert_char(half); +char __ovld __cnfn convert_char_rte(half); +char __ovld __cnfn convert_char_rtp(half); +char __ovld __cnfn convert_char_rtn(half); +char __ovld __cnfn convert_char_rtz(half); +char __ovld __cnfn convert_char_sat(half); +char __ovld __cnfn convert_char_sat_rte(half); +char __ovld __cnfn convert_char_sat_rtp(half); +char __ovld __cnfn convert_char_sat_rtn(half); +char __ovld __cnfn convert_char_sat_rtz(half); +char2 __ovld __cnfn convert_char2(half2); +char2 __ovld __cnfn convert_char2_rte(half2); +char2 __ovld __cnfn convert_char2_rtp(half2); +char2 __ovld __cnfn convert_char2_rtn(half2); +char2 __ovld __cnfn convert_char2_rtz(half2); +char2 __ovld __cnfn convert_char2_sat(half2); +char2 __ovld __cnfn convert_char2_sat_rte(half2); +char2 __ovld __cnfn convert_char2_sat_rtp(half2); +char2 __ovld __cnfn convert_char2_sat_rtn(half2); +char2 __ovld __cnfn convert_char2_sat_rtz(half2); +char3 __ovld __cnfn convert_char3(half3); +char3 __ovld __cnfn convert_char3_rte(half3); +char3 __ovld __cnfn convert_char3_rtp(half3); +char3 __ovld __cnfn convert_char3_rtn(half3); +char3 __ovld __cnfn convert_char3_rtz(half3); +char3 __ovld __cnfn convert_char3_sat(half3); +char3 __ovld __cnfn convert_char3_sat_rte(half3); +char3 __ovld __cnfn convert_char3_sat_rtp(half3); +char3 __ovld __cnfn convert_char3_sat_rtn(half3); +char3 __ovld __cnfn convert_char3_sat_rtz(half3); +char4 __ovld __cnfn convert_char4(half4); +char4 __ovld __cnfn convert_char4_rte(half4); +char4 __ovld __cnfn convert_char4_rtp(half4); +char4 __ovld __cnfn convert_char4_rtn(half4); +char4 __ovld __cnfn convert_char4_rtz(half4); +char4 __ovld __cnfn convert_char4_sat(half4); +char4 __ovld __cnfn convert_char4_sat_rte(half4); +char4 __ovld __cnfn convert_char4_sat_rtp(half4); +char4 __ovld __cnfn convert_char4_sat_rtn(half4); +char4 __ovld __cnfn convert_char4_sat_rtz(half4); +char8 __ovld __cnfn convert_char8(half8); +char8 __ovld __cnfn convert_char8_rte(half8); +char8 __ovld __cnfn convert_char8_rtp(half8); +char8 __ovld __cnfn convert_char8_rtn(half8); +char8 __ovld __cnfn convert_char8_rtz(half8); +char8 __ovld __cnfn convert_char8_sat(half8); +char8 __ovld __cnfn convert_char8_sat_rte(half8); +char8 __ovld __cnfn convert_char8_sat_rtp(half8); +char8 __ovld __cnfn convert_char8_sat_rtn(half8); +char8 __ovld __cnfn convert_char8_sat_rtz(half8); +char16 __ovld __cnfn convert_char16(half16); +char16 __ovld __cnfn convert_char16_rte(half16); +char16 __ovld __cnfn convert_char16_rtp(half16); +char16 __ovld __cnfn convert_char16_rtn(half16); +char16 __ovld __cnfn convert_char16_rtz(half16); +char16 __ovld __cnfn convert_char16_sat(half16); +char16 __ovld __cnfn convert_char16_sat_rte(half16); +char16 __ovld __cnfn convert_char16_sat_rtp(half16); +char16 __ovld __cnfn convert_char16_sat_rtn(half16); +char16 __ovld __cnfn convert_char16_sat_rtz(half16); +short __ovld __cnfn convert_short(half); +short __ovld __cnfn convert_short_rte(half); +short __ovld __cnfn convert_short_rtp(half); +short __ovld __cnfn convert_short_rtn(half); +short __ovld __cnfn convert_short_rtz(half); +short __ovld __cnfn convert_short_sat(half); +short __ovld __cnfn convert_short_sat_rte(half); +short __ovld __cnfn convert_short_sat_rtp(half); +short __ovld __cnfn convert_short_sat_rtn(half); +short __ovld __cnfn convert_short_sat_rtz(half); +short2 __ovld __cnfn convert_short2(half2); +short2 __ovld __cnfn convert_short2_rte(half2); +short2 __ovld __cnfn convert_short2_rtp(half2); +short2 __ovld __cnfn convert_short2_rtn(half2); +short2 __ovld __cnfn convert_short2_rtz(half2); +short2 __ovld __cnfn convert_short2_sat(half2); +short2 __ovld __cnfn convert_short2_sat_rte(half2); +short2 __ovld __cnfn convert_short2_sat_rtp(half2); +short2 __ovld __cnfn convert_short2_sat_rtn(half2); +short2 __ovld __cnfn convert_short2_sat_rtz(half2); +short3 __ovld __cnfn convert_short3(half3); +short3 __ovld __cnfn convert_short3_rte(half3); +short3 __ovld __cnfn convert_short3_rtp(half3); +short3 __ovld __cnfn convert_short3_rtn(half3); +short3 __ovld __cnfn convert_short3_rtz(half3); +short3 __ovld __cnfn convert_short3_sat(half3); +short3 __ovld __cnfn convert_short3_sat_rte(half3); +short3 __ovld __cnfn convert_short3_sat_rtp(half3); +short3 __ovld __cnfn convert_short3_sat_rtn(half3); +short3 __ovld __cnfn convert_short3_sat_rtz(half3); +short4 __ovld __cnfn convert_short4(half4); +short4 __ovld __cnfn convert_short4_rte(half4); +short4 __ovld __cnfn convert_short4_rtp(half4); +short4 __ovld __cnfn convert_short4_rtn(half4); +short4 __ovld __cnfn convert_short4_rtz(half4); +short4 __ovld __cnfn convert_short4_sat(half4); +short4 __ovld __cnfn convert_short4_sat_rte(half4); +short4 __ovld __cnfn convert_short4_sat_rtp(half4); +short4 __ovld __cnfn convert_short4_sat_rtn(half4); +short4 __ovld __cnfn convert_short4_sat_rtz(half4); +short8 __ovld __cnfn convert_short8(half8); +short8 __ovld __cnfn convert_short8_rte(half8); +short8 __ovld __cnfn convert_short8_rtp(half8); +short8 __ovld __cnfn convert_short8_rtn(half8); +short8 __ovld __cnfn convert_short8_rtz(half8); +short8 __ovld __cnfn convert_short8_sat(half8); +short8 __ovld __cnfn convert_short8_sat_rte(half8); +short8 __ovld __cnfn convert_short8_sat_rtp(half8); +short8 __ovld __cnfn convert_short8_sat_rtn(half8); +short8 __ovld __cnfn convert_short8_sat_rtz(half8); +short16 __ovld __cnfn convert_short16(half16); +short16 __ovld __cnfn convert_short16_rte(half16); +short16 __ovld __cnfn convert_short16_rtp(half16); +short16 __ovld __cnfn convert_short16_rtn(half16); +short16 __ovld __cnfn convert_short16_rtz(half16); +short16 __ovld __cnfn convert_short16_sat(half16); +short16 __ovld __cnfn convert_short16_sat_rte(half16); +short16 __ovld __cnfn convert_short16_sat_rtp(half16); +short16 __ovld __cnfn convert_short16_sat_rtn(half16); +short16 __ovld __cnfn convert_short16_sat_rtz(half16); +int __ovld __cnfn convert_int(half); +int __ovld __cnfn convert_int_rte(half); +int __ovld __cnfn convert_int_rtp(half); +int __ovld __cnfn convert_int_rtn(half); +int __ovld __cnfn convert_int_rtz(half); +int __ovld __cnfn convert_int_sat(half); +int __ovld __cnfn convert_int_sat_rte(half); +int __ovld __cnfn convert_int_sat_rtp(half); +int __ovld __cnfn convert_int_sat_rtn(half); +int __ovld __cnfn convert_int_sat_rtz(half); +int2 __ovld __cnfn convert_int2(half2); +int2 __ovld __cnfn convert_int2_rte(half2); +int2 __ovld __cnfn convert_int2_rtp(half2); +int2 __ovld __cnfn convert_int2_rtn(half2); +int2 __ovld __cnfn convert_int2_rtz(half2); +int2 __ovld __cnfn convert_int2_sat(half2); +int2 __ovld __cnfn convert_int2_sat_rte(half2); +int2 __ovld __cnfn convert_int2_sat_rtp(half2); +int2 __ovld __cnfn convert_int2_sat_rtn(half2); +int2 __ovld __cnfn convert_int2_sat_rtz(half2); +int3 __ovld __cnfn convert_int3(half3); +int3 __ovld __cnfn convert_int3_rte(half3); +int3 __ovld __cnfn convert_int3_rtp(half3); +int3 __ovld __cnfn convert_int3_rtn(half3); +int3 __ovld __cnfn convert_int3_rtz(half3); +int3 __ovld __cnfn convert_int3_sat(half3); +int3 __ovld __cnfn convert_int3_sat_rte(half3); +int3 __ovld __cnfn convert_int3_sat_rtp(half3); +int3 __ovld __cnfn convert_int3_sat_rtn(half3); +int3 __ovld __cnfn convert_int3_sat_rtz(half3); +int4 __ovld __cnfn convert_int4(half4); +int4 __ovld __cnfn convert_int4_rte(half4); +int4 __ovld __cnfn convert_int4_rtp(half4); +int4 __ovld __cnfn convert_int4_rtn(half4); +int4 __ovld __cnfn convert_int4_rtz(half4); +int4 __ovld __cnfn convert_int4_sat(half4); +int4 __ovld __cnfn convert_int4_sat_rte(half4); +int4 __ovld __cnfn convert_int4_sat_rtp(half4); +int4 __ovld __cnfn convert_int4_sat_rtn(half4); +int4 __ovld __cnfn convert_int4_sat_rtz(half4); +int8 __ovld __cnfn convert_int8(half8); +int8 __ovld __cnfn convert_int8_rte(half8); +int8 __ovld __cnfn convert_int8_rtp(half8); +int8 __ovld __cnfn convert_int8_rtn(half8); +int8 __ovld __cnfn convert_int8_rtz(half8); +int8 __ovld __cnfn convert_int8_sat(half8); +int8 __ovld __cnfn convert_int8_sat_rte(half8); +int8 __ovld __cnfn convert_int8_sat_rtp(half8); +int8 __ovld __cnfn convert_int8_sat_rtn(half8); +int8 __ovld __cnfn convert_int8_sat_rtz(half8); +int16 __ovld __cnfn convert_int16(half16); +int16 __ovld __cnfn convert_int16_rte(half16); +int16 __ovld __cnfn convert_int16_rtp(half16); +int16 __ovld __cnfn convert_int16_rtn(half16); +int16 __ovld __cnfn convert_int16_rtz(half16); +int16 __ovld __cnfn convert_int16_sat(half16); +int16 __ovld __cnfn convert_int16_sat_rte(half16); +int16 __ovld __cnfn convert_int16_sat_rtp(half16); +int16 __ovld __cnfn convert_int16_sat_rtn(half16); +int16 __ovld __cnfn convert_int16_sat_rtz(half16); +long __ovld __cnfn convert_long(half); +long __ovld __cnfn convert_long_rte(half); +long __ovld __cnfn convert_long_rtp(half); +long __ovld __cnfn convert_long_rtn(half); +long __ovld __cnfn convert_long_rtz(half); +long __ovld __cnfn convert_long_sat(half); +long __ovld __cnfn convert_long_sat_rte(half); +long __ovld __cnfn convert_long_sat_rtp(half); +long __ovld __cnfn convert_long_sat_rtn(half); +long __ovld __cnfn convert_long_sat_rtz(half); +long2 __ovld __cnfn convert_long2(half2); +long2 __ovld __cnfn convert_long2_rte(half2); +long2 __ovld __cnfn convert_long2_rtp(half2); +long2 __ovld __cnfn convert_long2_rtn(half2); +long2 __ovld __cnfn convert_long2_rtz(half2); +long2 __ovld __cnfn convert_long2_sat(half2); +long2 __ovld __cnfn convert_long2_sat_rte(half2); +long2 __ovld __cnfn convert_long2_sat_rtp(half2); +long2 __ovld __cnfn convert_long2_sat_rtn(half2); +long2 __ovld __cnfn convert_long2_sat_rtz(half2); +long3 __ovld __cnfn convert_long3(half3); +long3 __ovld __cnfn convert_long3_rte(half3); +long3 __ovld __cnfn convert_long3_rtp(half3); +long3 __ovld __cnfn convert_long3_rtn(half3); +long3 __ovld __cnfn convert_long3_rtz(half3); +long3 __ovld __cnfn convert_long3_sat(half3); +long3 __ovld __cnfn convert_long3_sat_rte(half3); +long3 __ovld __cnfn convert_long3_sat_rtp(half3); +long3 __ovld __cnfn convert_long3_sat_rtn(half3); +long3 __ovld __cnfn convert_long3_sat_rtz(half3); +long4 __ovld __cnfn convert_long4(half4); +long4 __ovld __cnfn convert_long4_rte(half4); +long4 __ovld __cnfn convert_long4_rtp(half4); +long4 __ovld __cnfn convert_long4_rtn(half4); +long4 __ovld __cnfn convert_long4_rtz(half4); +long4 __ovld __cnfn convert_long4_sat(half4); +long4 __ovld __cnfn convert_long4_sat_rte(half4); +long4 __ovld __cnfn convert_long4_sat_rtp(half4); +long4 __ovld __cnfn convert_long4_sat_rtn(half4); +long4 __ovld __cnfn convert_long4_sat_rtz(half4); +long8 __ovld __cnfn convert_long8(half8); +long8 __ovld __cnfn convert_long8_rte(half8); +long8 __ovld __cnfn convert_long8_rtp(half8); +long8 __ovld __cnfn convert_long8_rtn(half8); +long8 __ovld __cnfn convert_long8_rtz(half8); +long8 __ovld __cnfn convert_long8_sat(half8); +long8 __ovld __cnfn convert_long8_sat_rte(half8); +long8 __ovld __cnfn convert_long8_sat_rtp(half8); +long8 __ovld __cnfn convert_long8_sat_rtn(half8); +long8 __ovld __cnfn convert_long8_sat_rtz(half8); +long16 __ovld __cnfn convert_long16(half16); +long16 __ovld __cnfn convert_long16_rte(half16); +long16 __ovld __cnfn convert_long16_rtp(half16); +long16 __ovld __cnfn convert_long16_rtn(half16); +long16 __ovld __cnfn convert_long16_rtz(half16); +long16 __ovld __cnfn convert_long16_sat(half16); +long16 __ovld __cnfn convert_long16_sat_rte(half16); +long16 __ovld __cnfn convert_long16_sat_rtp(half16); +long16 __ovld __cnfn convert_long16_sat_rtn(half16); +long16 __ovld __cnfn convert_long16_sat_rtz(half16); +float __ovld __cnfn convert_float(half); +float __ovld __cnfn convert_float_rte(half); +float __ovld __cnfn convert_float_rtp(half); +float __ovld __cnfn convert_float_rtn(half); +float __ovld __cnfn convert_float_rtz(half); +float2 __ovld __cnfn convert_float2(half2); +float2 __ovld __cnfn convert_float2_rte(half2); +float2 __ovld __cnfn convert_float2_rtp(half2); +float2 __ovld __cnfn convert_float2_rtn(half2); +float2 __ovld __cnfn convert_float2_rtz(half2); +float3 __ovld __cnfn convert_float3(half3); +float3 __ovld __cnfn convert_float3_rte(half3); +float3 __ovld __cnfn convert_float3_rtp(half3); +float3 __ovld __cnfn convert_float3_rtn(half3); +float3 __ovld __cnfn convert_float3_rtz(half3); +float4 __ovld __cnfn convert_float4(half4); +float4 __ovld __cnfn convert_float4_rte(half4); +float4 __ovld __cnfn convert_float4_rtp(half4); +float4 __ovld __cnfn convert_float4_rtn(half4); +float4 __ovld __cnfn convert_float4_rtz(half4); +float8 __ovld __cnfn convert_float8(half8); +float8 __ovld __cnfn convert_float8_rte(half8); +float8 __ovld __cnfn convert_float8_rtp(half8); +float8 __ovld __cnfn convert_float8_rtn(half8); +float8 __ovld __cnfn convert_float8_rtz(half8); +float16 __ovld __cnfn convert_float16(half16); +float16 __ovld __cnfn convert_float16_rte(half16); +float16 __ovld __cnfn convert_float16_rtp(half16); +float16 __ovld __cnfn convert_float16_rtn(half16); +float16 __ovld __cnfn convert_float16_rtz(half16); + +// Convert non-double types to half types. +half __ovld __cnfn convert_half(uchar); +half __ovld __cnfn convert_half(ushort); +half __ovld __cnfn convert_half(uint); +half __ovld __cnfn convert_half(ulong); +half __ovld __cnfn convert_half(char); +half __ovld __cnfn convert_half(short); +half __ovld __cnfn convert_half(int); +half __ovld __cnfn convert_half(long); +half __ovld __cnfn convert_half(float); +half __ovld __cnfn convert_half(half); +half __ovld __cnfn convert_half_rte(uchar); +half __ovld __cnfn convert_half_rte(ushort); +half __ovld __cnfn convert_half_rte(uint); +half __ovld __cnfn convert_half_rte(ulong); +half __ovld __cnfn convert_half_rte(char); +half __ovld __cnfn convert_half_rte(short); +half __ovld __cnfn convert_half_rte(int); +half __ovld __cnfn convert_half_rte(long); +half __ovld __cnfn convert_half_rte(float); +half __ovld __cnfn convert_half_rte(half); +half __ovld __cnfn convert_half_rtp(uchar); +half __ovld __cnfn convert_half_rtp(ushort); +half __ovld __cnfn convert_half_rtp(uint); +half __ovld __cnfn convert_half_rtp(ulong); +half __ovld __cnfn convert_half_rtp(char); +half __ovld __cnfn convert_half_rtp(short); +half __ovld __cnfn convert_half_rtp(int); +half __ovld __cnfn convert_half_rtp(long); +half __ovld __cnfn convert_half_rtp(float); +half __ovld __cnfn convert_half_rtp(half); +half __ovld __cnfn convert_half_rtn(uchar); +half __ovld __cnfn convert_half_rtn(ushort); +half __ovld __cnfn convert_half_rtn(uint); +half __ovld __cnfn convert_half_rtn(ulong); +half __ovld __cnfn convert_half_rtn(char); +half __ovld __cnfn convert_half_rtn(short); +half __ovld __cnfn convert_half_rtn(int); +half __ovld __cnfn convert_half_rtn(long); +half __ovld __cnfn convert_half_rtn(float); +half __ovld __cnfn convert_half_rtn(half); +half __ovld __cnfn convert_half_rtz(uchar); +half __ovld __cnfn convert_half_rtz(ushort); +half __ovld __cnfn convert_half_rtz(uint); +half __ovld __cnfn convert_half_rtz(ulong); +half __ovld __cnfn convert_half_rtz(char); +half __ovld __cnfn convert_half_rtz(short); +half __ovld __cnfn convert_half_rtz(int); +half __ovld __cnfn convert_half_rtz(long); +half __ovld __cnfn convert_half_rtz(float); +half __ovld __cnfn convert_half_rtz(half); +half2 __ovld __cnfn convert_half2(char2); +half2 __ovld __cnfn convert_half2(uchar2); +half2 __ovld __cnfn convert_half2(short2); +half2 __ovld __cnfn convert_half2(ushort2); +half2 __ovld __cnfn convert_half2(int2); +half2 __ovld __cnfn convert_half2(uint2); +half2 __ovld __cnfn convert_half2(long2); +half2 __ovld __cnfn convert_half2(ulong2); +half2 __ovld __cnfn convert_half2(float2); +half2 __ovld __cnfn convert_half2(half2); +half2 __ovld __cnfn convert_half2_rte(char2); +half2 __ovld __cnfn convert_half2_rte(uchar2); +half2 __ovld __cnfn convert_half2_rte(short2); +half2 __ovld __cnfn convert_half2_rte(ushort2); +half2 __ovld __cnfn convert_half2_rte(int2); +half2 __ovld __cnfn convert_half2_rte(uint2); +half2 __ovld __cnfn convert_half2_rte(long2); +half2 __ovld __cnfn convert_half2_rte(ulong2); +half2 __ovld __cnfn convert_half2_rte(float2); +half2 __ovld __cnfn convert_half2_rte(half2); +half2 __ovld __cnfn convert_half2_rtp(char2); +half2 __ovld __cnfn convert_half2_rtp(uchar2); +half2 __ovld __cnfn convert_half2_rtp(short2); +half2 __ovld __cnfn convert_half2_rtp(ushort2); +half2 __ovld __cnfn convert_half2_rtp(int2); +half2 __ovld __cnfn convert_half2_rtp(uint2); +half2 __ovld __cnfn convert_half2_rtp(long2); +half2 __ovld __cnfn convert_half2_rtp(ulong2); +half2 __ovld __cnfn convert_half2_rtp(float2); +half2 __ovld __cnfn convert_half2_rtp(half2); +half2 __ovld __cnfn convert_half2_rtn(char2); +half2 __ovld __cnfn convert_half2_rtn(uchar2); +half2 __ovld __cnfn convert_half2_rtn(short2); +half2 __ovld __cnfn convert_half2_rtn(ushort2); +half2 __ovld __cnfn convert_half2_rtn(int2); +half2 __ovld __cnfn convert_half2_rtn(uint2); +half2 __ovld __cnfn convert_half2_rtn(long2); +half2 __ovld __cnfn convert_half2_rtn(ulong2); +half2 __ovld __cnfn convert_half2_rtn(float2); +half2 __ovld __cnfn convert_half2_rtn(half2); +half2 __ovld __cnfn convert_half2_rtz(char2); +half2 __ovld __cnfn convert_half2_rtz(uchar2); +half2 __ovld __cnfn convert_half2_rtz(short2); +half2 __ovld __cnfn convert_half2_rtz(ushort2); +half2 __ovld __cnfn convert_half2_rtz(int2); +half2 __ovld __cnfn convert_half2_rtz(uint2); +half2 __ovld __cnfn convert_half2_rtz(long2); +half2 __ovld __cnfn convert_half2_rtz(ulong2); +half2 __ovld __cnfn convert_half2_rtz(float2); +half2 __ovld __cnfn convert_half2_rtz(half2); +half3 __ovld __cnfn convert_half3(char3); +half3 __ovld __cnfn convert_half3(uchar3); +half3 __ovld __cnfn convert_half3(short3); +half3 __ovld __cnfn convert_half3(ushort3); +half3 __ovld __cnfn convert_half3(int3); +half3 __ovld __cnfn convert_half3(uint3); +half3 __ovld __cnfn convert_half3(long3); +half3 __ovld __cnfn convert_half3(ulong3); +half3 __ovld __cnfn convert_half3(float3); +half3 __ovld __cnfn convert_half3(half3); +half3 __ovld __cnfn convert_half3_rte(char3); +half3 __ovld __cnfn convert_half3_rte(uchar3); +half3 __ovld __cnfn convert_half3_rte(short3); +half3 __ovld __cnfn convert_half3_rte(ushort3); +half3 __ovld __cnfn convert_half3_rte(int3); +half3 __ovld __cnfn convert_half3_rte(uint3); +half3 __ovld __cnfn convert_half3_rte(long3); +half3 __ovld __cnfn convert_half3_rte(ulong3); +half3 __ovld __cnfn convert_half3_rte(float3); +half3 __ovld __cnfn convert_half3_rte(half3); +half3 __ovld __cnfn convert_half3_rtp(char3); +half3 __ovld __cnfn convert_half3_rtp(uchar3); +half3 __ovld __cnfn convert_half3_rtp(short3); +half3 __ovld __cnfn convert_half3_rtp(ushort3); +half3 __ovld __cnfn convert_half3_rtp(int3); +half3 __ovld __cnfn convert_half3_rtp(uint3); +half3 __ovld __cnfn convert_half3_rtp(long3); +half3 __ovld __cnfn convert_half3_rtp(ulong3); +half3 __ovld __cnfn convert_half3_rtp(float3); +half3 __ovld __cnfn convert_half3_rtp(half3); +half3 __ovld __cnfn convert_half3_rtn(char3); +half3 __ovld __cnfn convert_half3_rtn(uchar3); +half3 __ovld __cnfn convert_half3_rtn(short3); +half3 __ovld __cnfn convert_half3_rtn(ushort3); +half3 __ovld __cnfn convert_half3_rtn(int3); +half3 __ovld __cnfn convert_half3_rtn(uint3); +half3 __ovld __cnfn convert_half3_rtn(long3); +half3 __ovld __cnfn convert_half3_rtn(ulong3); +half3 __ovld __cnfn convert_half3_rtn(float3); +half3 __ovld __cnfn convert_half3_rtn(half3); +half3 __ovld __cnfn convert_half3_rtz(char3); +half3 __ovld __cnfn convert_half3_rtz(uchar3); +half3 __ovld __cnfn convert_half3_rtz(short3); +half3 __ovld __cnfn convert_half3_rtz(ushort3); +half3 __ovld __cnfn convert_half3_rtz(int3); +half3 __ovld __cnfn convert_half3_rtz(uint3); +half3 __ovld __cnfn convert_half3_rtz(long3); +half3 __ovld __cnfn convert_half3_rtz(ulong3); +half3 __ovld __cnfn convert_half3_rtz(float3); +half3 __ovld __cnfn convert_half3_rtz(half3); +half4 __ovld __cnfn convert_half4(char4); +half4 __ovld __cnfn convert_half4(uchar4); +half4 __ovld __cnfn convert_half4(short4); +half4 __ovld __cnfn convert_half4(ushort4); +half4 __ovld __cnfn convert_half4(int4); +half4 __ovld __cnfn convert_half4(uint4); +half4 __ovld __cnfn convert_half4(long4); +half4 __ovld __cnfn convert_half4(ulong4); +half4 __ovld __cnfn convert_half4(float4); +half4 __ovld __cnfn convert_half4(half4); +half4 __ovld __cnfn convert_half4_rte(char4); +half4 __ovld __cnfn convert_half4_rte(uchar4); +half4 __ovld __cnfn convert_half4_rte(short4); +half4 __ovld __cnfn convert_half4_rte(ushort4); +half4 __ovld __cnfn convert_half4_rte(int4); +half4 __ovld __cnfn convert_half4_rte(uint4); +half4 __ovld __cnfn convert_half4_rte(long4); +half4 __ovld __cnfn convert_half4_rte(ulong4); +half4 __ovld __cnfn convert_half4_rte(float4); +half4 __ovld __cnfn convert_half4_rte(half4); +half4 __ovld __cnfn convert_half4_rtp(char4); +half4 __ovld __cnfn convert_half4_rtp(uchar4); +half4 __ovld __cnfn convert_half4_rtp(short4); +half4 __ovld __cnfn convert_half4_rtp(ushort4); +half4 __ovld __cnfn convert_half4_rtp(int4); +half4 __ovld __cnfn convert_half4_rtp(uint4); +half4 __ovld __cnfn convert_half4_rtp(long4); +half4 __ovld __cnfn convert_half4_rtp(ulong4); +half4 __ovld __cnfn convert_half4_rtp(float4); +half4 __ovld __cnfn convert_half4_rtp(half4); +half4 __ovld __cnfn convert_half4_rtn(char4); +half4 __ovld __cnfn convert_half4_rtn(uchar4); +half4 __ovld __cnfn convert_half4_rtn(short4); +half4 __ovld __cnfn convert_half4_rtn(ushort4); +half4 __ovld __cnfn convert_half4_rtn(int4); +half4 __ovld __cnfn convert_half4_rtn(uint4); +half4 __ovld __cnfn convert_half4_rtn(long4); +half4 __ovld __cnfn convert_half4_rtn(ulong4); +half4 __ovld __cnfn convert_half4_rtn(float4); +half4 __ovld __cnfn convert_half4_rtn(half4); +half4 __ovld __cnfn convert_half4_rtz(char4); +half4 __ovld __cnfn convert_half4_rtz(uchar4); +half4 __ovld __cnfn convert_half4_rtz(short4); +half4 __ovld __cnfn convert_half4_rtz(ushort4); +half4 __ovld __cnfn convert_half4_rtz(int4); +half4 __ovld __cnfn convert_half4_rtz(uint4); +half4 __ovld __cnfn convert_half4_rtz(long4); +half4 __ovld __cnfn convert_half4_rtz(ulong4); +half4 __ovld __cnfn convert_half4_rtz(float4); +half4 __ovld __cnfn convert_half4_rtz(half4); +half8 __ovld __cnfn convert_half8(char8); +half8 __ovld __cnfn convert_half8(uchar8); +half8 __ovld __cnfn convert_half8(short8); +half8 __ovld __cnfn convert_half8(ushort8); +half8 __ovld __cnfn convert_half8(int8); +half8 __ovld __cnfn convert_half8(uint8); +half8 __ovld __cnfn convert_half8(long8); +half8 __ovld __cnfn convert_half8(ulong8); +half8 __ovld __cnfn convert_half8(float8); +half8 __ovld __cnfn convert_half8(half8); +half8 __ovld __cnfn convert_half8_rte(char8); +half8 __ovld __cnfn convert_half8_rte(uchar8); +half8 __ovld __cnfn convert_half8_rte(short8); +half8 __ovld __cnfn convert_half8_rte(ushort8); +half8 __ovld __cnfn convert_half8_rte(int8); +half8 __ovld __cnfn convert_half8_rte(uint8); +half8 __ovld __cnfn convert_half8_rte(long8); +half8 __ovld __cnfn convert_half8_rte(ulong8); +half8 __ovld __cnfn convert_half8_rte(float8); +half8 __ovld __cnfn convert_half8_rte(half8); +half8 __ovld __cnfn convert_half8_rtp(char8); +half8 __ovld __cnfn convert_half8_rtp(uchar8); +half8 __ovld __cnfn convert_half8_rtp(short8); +half8 __ovld __cnfn convert_half8_rtp(ushort8); +half8 __ovld __cnfn convert_half8_rtp(int8); +half8 __ovld __cnfn convert_half8_rtp(uint8); +half8 __ovld __cnfn convert_half8_rtp(long8); +half8 __ovld __cnfn convert_half8_rtp(ulong8); +half8 __ovld __cnfn convert_half8_rtp(float8); +half8 __ovld __cnfn convert_half8_rtp(half8); +half8 __ovld __cnfn convert_half8_rtn(char8); +half8 __ovld __cnfn convert_half8_rtn(uchar8); +half8 __ovld __cnfn convert_half8_rtn(short8); +half8 __ovld __cnfn convert_half8_rtn(ushort8); +half8 __ovld __cnfn convert_half8_rtn(int8); +half8 __ovld __cnfn convert_half8_rtn(uint8); +half8 __ovld __cnfn convert_half8_rtn(long8); +half8 __ovld __cnfn convert_half8_rtn(ulong8); +half8 __ovld __cnfn convert_half8_rtn(float8); +half8 __ovld __cnfn convert_half8_rtn(half8); +half8 __ovld __cnfn convert_half8_rtz(char8); +half8 __ovld __cnfn convert_half8_rtz(uchar8); +half8 __ovld __cnfn convert_half8_rtz(short8); +half8 __ovld __cnfn convert_half8_rtz(ushort8); +half8 __ovld __cnfn convert_half8_rtz(int8); +half8 __ovld __cnfn convert_half8_rtz(uint8); +half8 __ovld __cnfn convert_half8_rtz(long8); +half8 __ovld __cnfn convert_half8_rtz(ulong8); +half8 __ovld __cnfn convert_half8_rtz(float8); +half8 __ovld __cnfn convert_half8_rtz(half8); +half16 __ovld __cnfn convert_half16(char16); +half16 __ovld __cnfn convert_half16(uchar16); +half16 __ovld __cnfn convert_half16(short16); +half16 __ovld __cnfn convert_half16(ushort16); +half16 __ovld __cnfn convert_half16(int16); +half16 __ovld __cnfn convert_half16(uint16); +half16 __ovld __cnfn convert_half16(long16); +half16 __ovld __cnfn convert_half16(ulong16); +half16 __ovld __cnfn convert_half16(float16); +half16 __ovld __cnfn convert_half16(half16); +half16 __ovld __cnfn convert_half16_rte(char16); +half16 __ovld __cnfn convert_half16_rte(uchar16); +half16 __ovld __cnfn convert_half16_rte(short16); +half16 __ovld __cnfn convert_half16_rte(ushort16); +half16 __ovld __cnfn convert_half16_rte(int16); +half16 __ovld __cnfn convert_half16_rte(uint16); +half16 __ovld __cnfn convert_half16_rte(long16); +half16 __ovld __cnfn convert_half16_rte(ulong16); +half16 __ovld __cnfn convert_half16_rte(float16); +half16 __ovld __cnfn convert_half16_rte(half16); +half16 __ovld __cnfn convert_half16_rtp(char16); +half16 __ovld __cnfn convert_half16_rtp(uchar16); +half16 __ovld __cnfn convert_half16_rtp(short16); +half16 __ovld __cnfn convert_half16_rtp(ushort16); +half16 __ovld __cnfn convert_half16_rtp(int16); +half16 __ovld __cnfn convert_half16_rtp(uint16); +half16 __ovld __cnfn convert_half16_rtp(long16); +half16 __ovld __cnfn convert_half16_rtp(ulong16); +half16 __ovld __cnfn convert_half16_rtp(float16); +half16 __ovld __cnfn convert_half16_rtp(half16); +half16 __ovld __cnfn convert_half16_rtn(char16); +half16 __ovld __cnfn convert_half16_rtn(uchar16); +half16 __ovld __cnfn convert_half16_rtn(short16); +half16 __ovld __cnfn convert_half16_rtn(ushort16); +half16 __ovld __cnfn convert_half16_rtn(int16); +half16 __ovld __cnfn convert_half16_rtn(uint16); +half16 __ovld __cnfn convert_half16_rtn(long16); +half16 __ovld __cnfn convert_half16_rtn(ulong16); +half16 __ovld __cnfn convert_half16_rtn(float16); +half16 __ovld __cnfn convert_half16_rtn(half16); +half16 __ovld __cnfn convert_half16_rtz(char16); +half16 __ovld __cnfn convert_half16_rtz(uchar16); +half16 __ovld __cnfn convert_half16_rtz(short16); +half16 __ovld __cnfn convert_half16_rtz(ushort16); +half16 __ovld __cnfn convert_half16_rtz(int16); +half16 __ovld __cnfn convert_half16_rtz(uint16); +half16 __ovld __cnfn convert_half16_rtz(long16); +half16 __ovld __cnfn convert_half16_rtz(ulong16); +half16 __ovld __cnfn convert_half16_rtz(float16); +half16 __ovld __cnfn convert_half16_rtz(half16); + +// Convert half types to double types. +#ifdef cl_khr_fp64 +double __ovld __cnfn convert_double(half); +double __ovld __cnfn convert_double_rte(half); +double __ovld __cnfn convert_double_rtp(half); +double __ovld __cnfn convert_double_rtn(half); +double __ovld __cnfn convert_double_rtz(half); +double2 __ovld __cnfn convert_double2(half2); +double2 __ovld __cnfn convert_double2_rte(half2); +double2 __ovld __cnfn convert_double2_rtp(half2); +double2 __ovld __cnfn convert_double2_rtn(half2); +double2 __ovld __cnfn convert_double2_rtz(half2); +double3 __ovld __cnfn convert_double3(half3); +double3 __ovld __cnfn convert_double3_rte(half3); +double3 __ovld __cnfn convert_double3_rtp(half3); +double3 __ovld __cnfn convert_double3_rtn(half3); +double3 __ovld __cnfn convert_double3_rtz(half3); +double4 __ovld __cnfn convert_double4(half4); +double4 __ovld __cnfn convert_double4_rte(half4); +double4 __ovld __cnfn convert_double4_rtp(half4); +double4 __ovld __cnfn convert_double4_rtn(half4); +double4 __ovld __cnfn convert_double4_rtz(half4); +double8 __ovld __cnfn convert_double8(half8); +double8 __ovld __cnfn convert_double8_rte(half8); +double8 __ovld __cnfn convert_double8_rtp(half8); +double8 __ovld __cnfn convert_double8_rtn(half8); +double8 __ovld __cnfn convert_double8_rtz(half8); +double16 __ovld __cnfn convert_double16(half16); +double16 __ovld __cnfn convert_double16_rte(half16); +double16 __ovld __cnfn convert_double16_rtp(half16); +double16 __ovld __cnfn convert_double16_rtn(half16); +double16 __ovld __cnfn convert_double16_rtz(half16); + +// Convert double types to half types. +half __ovld __cnfn convert_half(double); +half __ovld __cnfn convert_half_rte(double); +half __ovld __cnfn convert_half_rtp(double); +half __ovld __cnfn convert_half_rtn(double); +half __ovld __cnfn convert_half_rtz(double); +half2 __ovld __cnfn convert_half2(double2); +half2 __ovld __cnfn convert_half2_rte(double2); +half2 __ovld __cnfn convert_half2_rtp(double2); +half2 __ovld __cnfn convert_half2_rtn(double2); +half2 __ovld __cnfn convert_half2_rtz(double2); +half3 __ovld __cnfn convert_half3(double3); +half3 __ovld __cnfn convert_half3_rte(double3); +half3 __ovld __cnfn convert_half3_rtp(double3); +half3 __ovld __cnfn convert_half3_rtn(double3); +half3 __ovld __cnfn convert_half3_rtz(double3); +half4 __ovld __cnfn convert_half4(double4); +half4 __ovld __cnfn convert_half4_rte(double4); +half4 __ovld __cnfn convert_half4_rtp(double4); +half4 __ovld __cnfn convert_half4_rtn(double4); +half4 __ovld __cnfn convert_half4_rtz(double4); +half8 __ovld __cnfn convert_half8(double8); +half8 __ovld __cnfn convert_half8_rte(double8); +half8 __ovld __cnfn convert_half8_rtp(double8); +half8 __ovld __cnfn convert_half8_rtn(double8); +half8 __ovld __cnfn convert_half8_rtz(double8); +half16 __ovld __cnfn convert_half16(double16); +half16 __ovld __cnfn convert_half16_rte(double16); +half16 __ovld __cnfn convert_half16_rtp(double16); +half16 __ovld __cnfn convert_half16_rtn(double16); +half16 __ovld __cnfn convert_half16_rtz(double16); +#endif //cl_khr_fp64 + +#endif // cl_khr_fp16 + +/** + * OpenCL v1.1/1.2/2.0 s6.2.4.2 - as_type operators + * Reinterprets a data type as another data type of the same size + */ +char __ovld __cnfn as_char(char); +char __ovld __cnfn as_char(uchar); + +char2 __ovld __cnfn as_char2(char2); +char2 __ovld __cnfn as_char2(uchar2); +char2 __ovld __cnfn as_char2(short); +char2 __ovld __cnfn as_char2(ushort); + +char3 __ovld __cnfn as_char3(char3); +char3 __ovld __cnfn as_char3(char4); +char3 __ovld __cnfn as_char3(uchar3); +char3 __ovld __cnfn as_char3(uchar4); +char3 __ovld __cnfn as_char3(short2); +char3 __ovld __cnfn as_char3(ushort2); +char3 __ovld __cnfn as_char3(int); +char3 __ovld __cnfn as_char3(uint); +char3 __ovld __cnfn as_char3(float); + +char4 __ovld __cnfn as_char4(char3); +char4 __ovld __cnfn as_char4(char4); +char4 __ovld __cnfn as_char4(uchar3); +char4 __ovld __cnfn as_char4(uchar4); +char4 __ovld __cnfn as_char4(short2); +char4 __ovld __cnfn as_char4(ushort2); +char4 __ovld __cnfn as_char4(int); +char4 __ovld __cnfn as_char4(uint); +char4 __ovld __cnfn as_char4(float); + +char8 __ovld __cnfn as_char8(char8); +char8 __ovld __cnfn as_char8(uchar8); +char8 __ovld __cnfn as_char8(short3); +char8 __ovld __cnfn as_char8(short4); +char8 __ovld __cnfn as_char8(ushort3); +char8 __ovld __cnfn as_char8(ushort4); +char8 __ovld __cnfn as_char8(int2); +char8 __ovld __cnfn as_char8(uint2); +char8 __ovld __cnfn as_char8(long); +char8 __ovld __cnfn as_char8(ulong); +char8 __ovld __cnfn as_char8(float2); + +char16 __ovld __cnfn as_char16(char16); +char16 __ovld __cnfn as_char16(uchar16); +char16 __ovld __cnfn as_char16(short8); +char16 __ovld __cnfn as_char16(ushort8); +char16 __ovld __cnfn as_char16(int3); +char16 __ovld __cnfn as_char16(int4); +char16 __ovld __cnfn as_char16(uint3); +char16 __ovld __cnfn as_char16(uint4); +char16 __ovld __cnfn as_char16(long2); +char16 __ovld __cnfn as_char16(ulong2); +char16 __ovld __cnfn as_char16(float3); +char16 __ovld __cnfn as_char16(float4); + +uchar __ovld __cnfn as_uchar(char); +uchar __ovld __cnfn as_uchar(uchar); + +uchar2 __ovld __cnfn as_uchar2(char2); +uchar2 __ovld __cnfn as_uchar2(uchar2); +uchar2 __ovld __cnfn as_uchar2(short); +uchar2 __ovld __cnfn as_uchar2(ushort); + +uchar3 __ovld __cnfn as_uchar3(char3); +uchar3 __ovld __cnfn as_uchar3(char4); +uchar3 __ovld __cnfn as_uchar3(uchar3); +uchar3 __ovld __cnfn as_uchar3(uchar4); +uchar3 __ovld __cnfn as_uchar3(short2); +uchar3 __ovld __cnfn as_uchar3(ushort2); +uchar3 __ovld __cnfn as_uchar3(int); +uchar3 __ovld __cnfn as_uchar3(uint); +uchar3 __ovld __cnfn as_uchar3(float); + +uchar4 __ovld __cnfn as_uchar4(char3); +uchar4 __ovld __cnfn as_uchar4(char4); +uchar4 __ovld __cnfn as_uchar4(uchar3); +uchar4 __ovld __cnfn as_uchar4(uchar4); +uchar4 __ovld __cnfn as_uchar4(short2); +uchar4 __ovld __cnfn as_uchar4(ushort2); +uchar4 __ovld __cnfn as_uchar4(int); +uchar4 __ovld __cnfn as_uchar4(uint); +uchar4 __ovld __cnfn as_uchar4(float); + +uchar8 __ovld __cnfn as_uchar8(char8); +uchar8 __ovld __cnfn as_uchar8(uchar8); +uchar8 __ovld __cnfn as_uchar8(short3); +uchar8 __ovld __cnfn as_uchar8(short4); +uchar8 __ovld __cnfn as_uchar8(ushort3); +uchar8 __ovld __cnfn as_uchar8(ushort4); +uchar8 __ovld __cnfn as_uchar8(int2); +uchar8 __ovld __cnfn as_uchar8(uint2); +uchar8 __ovld __cnfn as_uchar8(long); +uchar8 __ovld __cnfn as_uchar8(ulong); +uchar8 __ovld __cnfn as_uchar8(float2); + +uchar16 __ovld __cnfn as_uchar16(char16); +uchar16 __ovld __cnfn as_uchar16(uchar16); +uchar16 __ovld __cnfn as_uchar16(short8); +uchar16 __ovld __cnfn as_uchar16(ushort8); +uchar16 __ovld __cnfn as_uchar16(int3); +uchar16 __ovld __cnfn as_uchar16(int4); +uchar16 __ovld __cnfn as_uchar16(uint3); +uchar16 __ovld __cnfn as_uchar16(uint4); +uchar16 __ovld __cnfn as_uchar16(long2); +uchar16 __ovld __cnfn as_uchar16(ulong2); +uchar16 __ovld __cnfn as_uchar16(float3); +uchar16 __ovld __cnfn as_uchar16(float4); + +short __ovld __cnfn as_short(char2); +short __ovld __cnfn as_short(uchar2); +short __ovld __cnfn as_short(short); +short __ovld __cnfn as_short(ushort); + +short2 __ovld __cnfn as_short2(char3); +short2 __ovld __cnfn as_short2(char4); +short2 __ovld __cnfn as_short2(uchar3); +short2 __ovld __cnfn as_short2(uchar4); +short2 __ovld __cnfn as_short2(short2); +short2 __ovld __cnfn as_short2(ushort2); +short2 __ovld __cnfn as_short2(int); +short2 __ovld __cnfn as_short2(uint); +short2 __ovld __cnfn as_short2(float); + +short3 __ovld __cnfn as_short3(char8); +short3 __ovld __cnfn as_short3(uchar8); +short3 __ovld __cnfn as_short3(short3); +short3 __ovld __cnfn as_short3(short4); +short3 __ovld __cnfn as_short3(ushort3); +short3 __ovld __cnfn as_short3(ushort4); +short3 __ovld __cnfn as_short3(int2); +short3 __ovld __cnfn as_short3(uint2); +short3 __ovld __cnfn as_short3(long); +short3 __ovld __cnfn as_short3(ulong); +short3 __ovld __cnfn as_short3(float2); + +short4 __ovld __cnfn as_short4(char8); +short4 __ovld __cnfn as_short4(uchar8); +short4 __ovld __cnfn as_short4(short3); +short4 __ovld __cnfn as_short4(short4); +short4 __ovld __cnfn as_short4(ushort3); +short4 __ovld __cnfn as_short4(ushort4); +short4 __ovld __cnfn as_short4(int2); +short4 __ovld __cnfn as_short4(uint2); +short4 __ovld __cnfn as_short4(long); +short4 __ovld __cnfn as_short4(ulong); +short4 __ovld __cnfn as_short4(float2); + +short8 __ovld __cnfn as_short8(char16); +short8 __ovld __cnfn as_short8(uchar16); +short8 __ovld __cnfn as_short8(short8); +short8 __ovld __cnfn as_short8(ushort8); +short8 __ovld __cnfn as_short8(int3); +short8 __ovld __cnfn as_short8(int4); +short8 __ovld __cnfn as_short8(uint3); +short8 __ovld __cnfn as_short8(uint4); +short8 __ovld __cnfn as_short8(long2); +short8 __ovld __cnfn as_short8(ulong2); +short8 __ovld __cnfn as_short8(float3); +short8 __ovld __cnfn as_short8(float4); + +short16 __ovld __cnfn as_short16(short16); +short16 __ovld __cnfn as_short16(ushort16); +short16 __ovld __cnfn as_short16(int8); +short16 __ovld __cnfn as_short16(uint8); +short16 __ovld __cnfn as_short16(long3); +short16 __ovld __cnfn as_short16(long4); +short16 __ovld __cnfn as_short16(ulong3); +short16 __ovld __cnfn as_short16(ulong4); +short16 __ovld __cnfn as_short16(float8); + +ushort __ovld __cnfn as_ushort(char2); +ushort __ovld __cnfn as_ushort(uchar2); +ushort __ovld __cnfn as_ushort(short); +ushort __ovld __cnfn as_ushort(ushort); + +ushort2 __ovld __cnfn as_ushort2(char3); +ushort2 __ovld __cnfn as_ushort2(char4); +ushort2 __ovld __cnfn as_ushort2(uchar3); +ushort2 __ovld __cnfn as_ushort2(uchar4); +ushort2 __ovld __cnfn as_ushort2(short2); +ushort2 __ovld __cnfn as_ushort2(ushort2); +ushort2 __ovld __cnfn as_ushort2(int); +ushort2 __ovld __cnfn as_ushort2(uint); +ushort2 __ovld __cnfn as_ushort2(float); + +ushort3 __ovld __cnfn as_ushort3(char8); +ushort3 __ovld __cnfn as_ushort3(uchar8); +ushort3 __ovld __cnfn as_ushort3(short3); +ushort3 __ovld __cnfn as_ushort3(short4); +ushort3 __ovld __cnfn as_ushort3(ushort3); +ushort3 __ovld __cnfn as_ushort3(ushort4); +ushort3 __ovld __cnfn as_ushort3(int2); +ushort3 __ovld __cnfn as_ushort3(uint2); +ushort3 __ovld __cnfn as_ushort3(long); +ushort3 __ovld __cnfn as_ushort3(ulong); +ushort3 __ovld __cnfn as_ushort3(float2); + +ushort4 __ovld __cnfn as_ushort4(char8); +ushort4 __ovld __cnfn as_ushort4(uchar8); +ushort4 __ovld __cnfn as_ushort4(short3); +ushort4 __ovld __cnfn as_ushort4(short4); +ushort4 __ovld __cnfn as_ushort4(ushort3); +ushort4 __ovld __cnfn as_ushort4(ushort4); +ushort4 __ovld __cnfn as_ushort4(int2); +ushort4 __ovld __cnfn as_ushort4(uint2); +ushort4 __ovld __cnfn as_ushort4(long); +ushort4 __ovld __cnfn as_ushort4(ulong); +ushort4 __ovld __cnfn as_ushort4(float2); + +ushort8 __ovld __cnfn as_ushort8(char16); +ushort8 __ovld __cnfn as_ushort8(uchar16); +ushort8 __ovld __cnfn as_ushort8(short8); +ushort8 __ovld __cnfn as_ushort8(ushort8); +ushort8 __ovld __cnfn as_ushort8(int3); +ushort8 __ovld __cnfn as_ushort8(int4); +ushort8 __ovld __cnfn as_ushort8(uint3); +ushort8 __ovld __cnfn as_ushort8(uint4); +ushort8 __ovld __cnfn as_ushort8(long2); +ushort8 __ovld __cnfn as_ushort8(ulong2); +ushort8 __ovld __cnfn as_ushort8(float3); +ushort8 __ovld __cnfn as_ushort8(float4); + +ushort16 __ovld __cnfn as_ushort16(short16); +ushort16 __ovld __cnfn as_ushort16(ushort16); +ushort16 __ovld __cnfn as_ushort16(int8); +ushort16 __ovld __cnfn as_ushort16(uint8); +ushort16 __ovld __cnfn as_ushort16(long3); +ushort16 __ovld __cnfn as_ushort16(long4); +ushort16 __ovld __cnfn as_ushort16(ulong3); +ushort16 __ovld __cnfn as_ushort16(ulong4); +ushort16 __ovld __cnfn as_ushort16(float8); + +int __ovld __cnfn as_int(char3); +int __ovld __cnfn as_int(char4); +int __ovld __cnfn as_int(uchar3); +int __ovld __cnfn as_int(uchar4); +int __ovld __cnfn as_int(short2); +int __ovld __cnfn as_int(ushort2); +int __ovld __cnfn as_int(int); +int __ovld __cnfn as_int(uint); +int __ovld __cnfn as_int(float); + +int2 __ovld __cnfn as_int2(char8); +int2 __ovld __cnfn as_int2(uchar8); +int2 __ovld __cnfn as_int2(short3); +int2 __ovld __cnfn as_int2(short4); +int2 __ovld __cnfn as_int2(ushort3); +int2 __ovld __cnfn as_int2(ushort4); +int2 __ovld __cnfn as_int2(int2); +int2 __ovld __cnfn as_int2(uint2); +int2 __ovld __cnfn as_int2(long); +int2 __ovld __cnfn as_int2(ulong); +int2 __ovld __cnfn as_int2(float2); + +int3 __ovld __cnfn as_int3(char16); +int3 __ovld __cnfn as_int3(uchar16); +int3 __ovld __cnfn as_int3(short8); +int3 __ovld __cnfn as_int3(ushort8); +int3 __ovld __cnfn as_int3(int3); +int3 __ovld __cnfn as_int3(int4); +int3 __ovld __cnfn as_int3(uint3); +int3 __ovld __cnfn as_int3(uint4); +int3 __ovld __cnfn as_int3(long2); +int3 __ovld __cnfn as_int3(ulong2); +int3 __ovld __cnfn as_int3(float3); +int3 __ovld __cnfn as_int3(float4); + +int4 __ovld __cnfn as_int4(char16); +int4 __ovld __cnfn as_int4(uchar16); +int4 __ovld __cnfn as_int4(short8); +int4 __ovld __cnfn as_int4(ushort8); +int4 __ovld __cnfn as_int4(int3); +int4 __ovld __cnfn as_int4(int4); +int4 __ovld __cnfn as_int4(uint3); +int4 __ovld __cnfn as_int4(uint4); +int4 __ovld __cnfn as_int4(long2); +int4 __ovld __cnfn as_int4(ulong2); +int4 __ovld __cnfn as_int4(float3); +int4 __ovld __cnfn as_int4(float4); + +int8 __ovld __cnfn as_int8(short16); +int8 __ovld __cnfn as_int8(ushort16); +int8 __ovld __cnfn as_int8(int8); +int8 __ovld __cnfn as_int8(uint8); +int8 __ovld __cnfn as_int8(long3); +int8 __ovld __cnfn as_int8(long4); +int8 __ovld __cnfn as_int8(ulong3); +int8 __ovld __cnfn as_int8(ulong4); +int8 __ovld __cnfn as_int8(float8); + +int16 __ovld __cnfn as_int16(int16); +int16 __ovld __cnfn as_int16(uint16); +int16 __ovld __cnfn as_int16(long8); +int16 __ovld __cnfn as_int16(ulong8); +int16 __ovld __cnfn as_int16(float16); + +uint __ovld __cnfn as_uint(char3); +uint __ovld __cnfn as_uint(char4); +uint __ovld __cnfn as_uint(uchar3); +uint __ovld __cnfn as_uint(uchar4); +uint __ovld __cnfn as_uint(short2); +uint __ovld __cnfn as_uint(ushort2); +uint __ovld __cnfn as_uint(int); +uint __ovld __cnfn as_uint(uint); +uint __ovld __cnfn as_uint(float); + +uint2 __ovld __cnfn as_uint2(char8); +uint2 __ovld __cnfn as_uint2(uchar8); +uint2 __ovld __cnfn as_uint2(short3); +uint2 __ovld __cnfn as_uint2(short4); +uint2 __ovld __cnfn as_uint2(ushort3); +uint2 __ovld __cnfn as_uint2(ushort4); +uint2 __ovld __cnfn as_uint2(int2); +uint2 __ovld __cnfn as_uint2(uint2); +uint2 __ovld __cnfn as_uint2(long); +uint2 __ovld __cnfn as_uint2(ulong); +uint2 __ovld __cnfn as_uint2(float2); + +uint3 __ovld __cnfn as_uint3(char16); +uint3 __ovld __cnfn as_uint3(uchar16); +uint3 __ovld __cnfn as_uint3(short8); +uint3 __ovld __cnfn as_uint3(ushort8); +uint3 __ovld __cnfn as_uint3(int3); +uint3 __ovld __cnfn as_uint3(int4); +uint3 __ovld __cnfn as_uint3(uint3); +uint3 __ovld __cnfn as_uint3(uint4); +uint3 __ovld __cnfn as_uint3(long2); +uint3 __ovld __cnfn as_uint3(ulong2); +uint3 __ovld __cnfn as_uint3(float3); +uint3 __ovld __cnfn as_uint3(float4); + +uint4 __ovld __cnfn as_uint4(char16); +uint4 __ovld __cnfn as_uint4(uchar16); +uint4 __ovld __cnfn as_uint4(short8); +uint4 __ovld __cnfn as_uint4(ushort8); +uint4 __ovld __cnfn as_uint4(int3); +uint4 __ovld __cnfn as_uint4(int4); +uint4 __ovld __cnfn as_uint4(uint3); +uint4 __ovld __cnfn as_uint4(uint4); +uint4 __ovld __cnfn as_uint4(long2); +uint4 __ovld __cnfn as_uint4(ulong2); +uint4 __ovld __cnfn as_uint4(float3); +uint4 __ovld __cnfn as_uint4(float4); + +uint8 __ovld __cnfn as_uint8(short16); +uint8 __ovld __cnfn as_uint8(ushort16); +uint8 __ovld __cnfn as_uint8(int8); +uint8 __ovld __cnfn as_uint8(uint8); +uint8 __ovld __cnfn as_uint8(long3); +uint8 __ovld __cnfn as_uint8(long4); +uint8 __ovld __cnfn as_uint8(ulong3); +uint8 __ovld __cnfn as_uint8(ulong4); +uint8 __ovld __cnfn as_uint8(float8); + +uint16 __ovld __cnfn as_uint16(int16); +uint16 __ovld __cnfn as_uint16(uint16); +uint16 __ovld __cnfn as_uint16(long8); +uint16 __ovld __cnfn as_uint16(ulong8); +uint16 __ovld __cnfn as_uint16(float16); + +long __ovld __cnfn as_long(char8); +long __ovld __cnfn as_long(uchar8); +long __ovld __cnfn as_long(short3); +long __ovld __cnfn as_long(short4); +long __ovld __cnfn as_long(ushort3); +long __ovld __cnfn as_long(ushort4); +long __ovld __cnfn as_long(int2); +long __ovld __cnfn as_long(uint2); +long __ovld __cnfn as_long(long); +long __ovld __cnfn as_long(ulong); +long __ovld __cnfn as_long(float2); + +long2 __ovld __cnfn as_long2(char16); +long2 __ovld __cnfn as_long2(uchar16); +long2 __ovld __cnfn as_long2(short8); +long2 __ovld __cnfn as_long2(ushort8); +long2 __ovld __cnfn as_long2(int3); +long2 __ovld __cnfn as_long2(int4); +long2 __ovld __cnfn as_long2(uint3); +long2 __ovld __cnfn as_long2(uint4); +long2 __ovld __cnfn as_long2(long2); +long2 __ovld __cnfn as_long2(ulong2); +long2 __ovld __cnfn as_long2(float3); +long2 __ovld __cnfn as_long2(float4); + +long3 __ovld __cnfn as_long3(short16); +long3 __ovld __cnfn as_long3(ushort16); +long3 __ovld __cnfn as_long3(int8); +long3 __ovld __cnfn as_long3(uint8); +long3 __ovld __cnfn as_long3(long3); +long3 __ovld __cnfn as_long3(long4); +long3 __ovld __cnfn as_long3(ulong3); +long3 __ovld __cnfn as_long3(ulong4); +long3 __ovld __cnfn as_long3(float8); + +long4 __ovld __cnfn as_long4(short16); +long4 __ovld __cnfn as_long4(ushort16); +long4 __ovld __cnfn as_long4(int8); +long4 __ovld __cnfn as_long4(uint8); +long4 __ovld __cnfn as_long4(long3); +long4 __ovld __cnfn as_long4(long4); +long4 __ovld __cnfn as_long4(ulong3); +long4 __ovld __cnfn as_long4(ulong4); +long4 __ovld __cnfn as_long4(float8); + +long8 __ovld __cnfn as_long8(int16); +long8 __ovld __cnfn as_long8(uint16); +long8 __ovld __cnfn as_long8(long8); +long8 __ovld __cnfn as_long8(ulong8); +long8 __ovld __cnfn as_long8(float16); + +long16 __ovld __cnfn as_long16(long16); +long16 __ovld __cnfn as_long16(ulong16); + +ulong __ovld __cnfn as_ulong(char8); +ulong __ovld __cnfn as_ulong(uchar8); +ulong __ovld __cnfn as_ulong(short3); +ulong __ovld __cnfn as_ulong(short4); +ulong __ovld __cnfn as_ulong(ushort3); +ulong __ovld __cnfn as_ulong(ushort4); +ulong __ovld __cnfn as_ulong(int2); +ulong __ovld __cnfn as_ulong(uint2); +ulong __ovld __cnfn as_ulong(long); +ulong __ovld __cnfn as_ulong(ulong); +ulong __ovld __cnfn as_ulong(float2); + +ulong2 __ovld __cnfn as_ulong2(char16); +ulong2 __ovld __cnfn as_ulong2(uchar16); +ulong2 __ovld __cnfn as_ulong2(short8); +ulong2 __ovld __cnfn as_ulong2(ushort8); +ulong2 __ovld __cnfn as_ulong2(int3); +ulong2 __ovld __cnfn as_ulong2(int4); +ulong2 __ovld __cnfn as_ulong2(uint3); +ulong2 __ovld __cnfn as_ulong2(uint4); +ulong2 __ovld __cnfn as_ulong2(long2); +ulong2 __ovld __cnfn as_ulong2(ulong2); +ulong2 __ovld __cnfn as_ulong2(float3); +ulong2 __ovld __cnfn as_ulong2(float4); + +ulong3 __ovld __cnfn as_ulong3(short16); +ulong3 __ovld __cnfn as_ulong3(ushort16); +ulong3 __ovld __cnfn as_ulong3(int8); +ulong3 __ovld __cnfn as_ulong3(uint8); +ulong3 __ovld __cnfn as_ulong3(long3); +ulong3 __ovld __cnfn as_ulong3(long4); +ulong3 __ovld __cnfn as_ulong3(ulong3); +ulong3 __ovld __cnfn as_ulong3(ulong4); +ulong3 __ovld __cnfn as_ulong3(float8); + +ulong4 __ovld __cnfn as_ulong4(short16); +ulong4 __ovld __cnfn as_ulong4(ushort16); +ulong4 __ovld __cnfn as_ulong4(int8); +ulong4 __ovld __cnfn as_ulong4(uint8); +ulong4 __ovld __cnfn as_ulong4(long3); +ulong4 __ovld __cnfn as_ulong4(long4); +ulong4 __ovld __cnfn as_ulong4(ulong3); +ulong4 __ovld __cnfn as_ulong4(ulong4); +ulong4 __ovld __cnfn as_ulong4(float8); + +ulong8 __ovld __cnfn as_ulong8(int16); +ulong8 __ovld __cnfn as_ulong8(uint16); +ulong8 __ovld __cnfn as_ulong8(long8); +ulong8 __ovld __cnfn as_ulong8(ulong8); +ulong8 __ovld __cnfn as_ulong8(float16); + +ulong16 __ovld __cnfn as_ulong16(long16); +ulong16 __ovld __cnfn as_ulong16(ulong16); + +float __ovld __cnfn as_float(char3); +float __ovld __cnfn as_float(char4); +float __ovld __cnfn as_float(uchar3); +float __ovld __cnfn as_float(uchar4); +float __ovld __cnfn as_float(short2); +float __ovld __cnfn as_float(ushort2); +float __ovld __cnfn as_float(int); +float __ovld __cnfn as_float(uint); +float __ovld __cnfn as_float(float); + +float2 __ovld __cnfn as_float2(char8); +float2 __ovld __cnfn as_float2(uchar8); +float2 __ovld __cnfn as_float2(short3); +float2 __ovld __cnfn as_float2(short4); +float2 __ovld __cnfn as_float2(ushort3); +float2 __ovld __cnfn as_float2(ushort4); +float2 __ovld __cnfn as_float2(int2); +float2 __ovld __cnfn as_float2(uint2); +float2 __ovld __cnfn as_float2(long); +float2 __ovld __cnfn as_float2(ulong); +float2 __ovld __cnfn as_float2(float2); + +float3 __ovld __cnfn as_float3(char16); +float3 __ovld __cnfn as_float3(uchar16); +float3 __ovld __cnfn as_float3(short8); +float3 __ovld __cnfn as_float3(ushort8); +float3 __ovld __cnfn as_float3(int3); +float3 __ovld __cnfn as_float3(int4); +float3 __ovld __cnfn as_float3(uint3); +float3 __ovld __cnfn as_float3(uint4); +float3 __ovld __cnfn as_float3(long2); +float3 __ovld __cnfn as_float3(ulong2); +float3 __ovld __cnfn as_float3(float3); +float3 __ovld __cnfn as_float3(float4); + +float4 __ovld __cnfn as_float4(char16); +float4 __ovld __cnfn as_float4(uchar16); +float4 __ovld __cnfn as_float4(short8); +float4 __ovld __cnfn as_float4(ushort8); +float4 __ovld __cnfn as_float4(int3); +float4 __ovld __cnfn as_float4(int4); +float4 __ovld __cnfn as_float4(uint3); +float4 __ovld __cnfn as_float4(uint4); +float4 __ovld __cnfn as_float4(long2); +float4 __ovld __cnfn as_float4(ulong2); +float4 __ovld __cnfn as_float4(float3); +float4 __ovld __cnfn as_float4(float4); + +float8 __ovld __cnfn as_float8(short16); +float8 __ovld __cnfn as_float8(ushort16); +float8 __ovld __cnfn as_float8(int8); +float8 __ovld __cnfn as_float8(uint8); +float8 __ovld __cnfn as_float8(long3); +float8 __ovld __cnfn as_float8(long4); +float8 __ovld __cnfn as_float8(ulong3); +float8 __ovld __cnfn as_float8(ulong4); +float8 __ovld __cnfn as_float8(float8); + +float16 __ovld __cnfn as_float16(int16); +float16 __ovld __cnfn as_float16(uint16); +float16 __ovld __cnfn as_float16(long8); +float16 __ovld __cnfn as_float16(ulong8); +float16 __ovld __cnfn as_float16(float16); + +#ifdef cl_khr_fp64 +char8 __ovld __cnfn as_char8(double); +char16 __ovld __cnfn as_char16(double2); +uchar8 __ovld __cnfn as_uchar8(double); +uchar16 __ovld __cnfn as_uchar16(double2); +short3 __ovld __cnfn as_short3(double); +short4 __ovld __cnfn as_short4(double); +short8 __ovld __cnfn as_short8(double2); +short16 __ovld __cnfn as_short16(double3); +short16 __ovld __cnfn as_short16(double4); +ushort3 __ovld __cnfn as_ushort3(double); +ushort4 __ovld __cnfn as_ushort4(double); +ushort8 __ovld __cnfn as_ushort8(double2); +ushort16 __ovld __cnfn as_ushort16(double3); +ushort16 __ovld __cnfn as_ushort16(double4); +int2 __ovld __cnfn as_int2(double); +int3 __ovld __cnfn as_int3(double2); +int4 __ovld __cnfn as_int4(double2); +int8 __ovld __cnfn as_int8(double3); +int8 __ovld __cnfn as_int8(double4); +int16 __ovld __cnfn as_int16(double8); +uint2 __ovld __cnfn as_uint2(double); +uint3 __ovld __cnfn as_uint3(double2); +uint4 __ovld __cnfn as_uint4(double2); +uint8 __ovld __cnfn as_uint8(double3); +uint8 __ovld __cnfn as_uint8(double4); +uint16 __ovld __cnfn as_uint16(double8); +long __ovld __cnfn as_long(double); +long2 __ovld __cnfn as_long2(double2); +long3 __ovld __cnfn as_long3(double3); +long3 __ovld __cnfn as_long3(double4); +long4 __ovld __cnfn as_long4(double3); +long4 __ovld __cnfn as_long4(double4); +long8 __ovld __cnfn as_long8(double8); +long16 __ovld __cnfn as_long16(double16); +ulong __ovld __cnfn as_ulong(double); +ulong2 __ovld __cnfn as_ulong2(double2); +ulong3 __ovld __cnfn as_ulong3(double3); +ulong3 __ovld __cnfn as_ulong3(double4); +ulong4 __ovld __cnfn as_ulong4(double3); +ulong4 __ovld __cnfn as_ulong4(double4); +ulong8 __ovld __cnfn as_ulong8(double8); +ulong16 __ovld __cnfn as_ulong16(double16); +float2 __ovld __cnfn as_float2(double); +float3 __ovld __cnfn as_float3(double2); +float4 __ovld __cnfn as_float4(double2); +float8 __ovld __cnfn as_float8(double3); +float8 __ovld __cnfn as_float8(double4); +float16 __ovld __cnfn as_float16(double8); +double __ovld __cnfn as_double(char8); +double __ovld __cnfn as_double(uchar8); +double __ovld __cnfn as_double(short3); +double __ovld __cnfn as_double(short4); +double __ovld __cnfn as_double(ushort3); +double __ovld __cnfn as_double(ushort4); +double __ovld __cnfn as_double(int2); +double __ovld __cnfn as_double(uint2); +double __ovld __cnfn as_double(long); +double __ovld __cnfn as_double(ulong); +double __ovld __cnfn as_double(float2); +double __ovld __cnfn as_double(double); +double2 __ovld __cnfn as_double2(char16); +double2 __ovld __cnfn as_double2(uchar16); +double2 __ovld __cnfn as_double2(short8); +double2 __ovld __cnfn as_double2(ushort8); +double2 __ovld __cnfn as_double2(int3); +double2 __ovld __cnfn as_double2(int4); +double2 __ovld __cnfn as_double2(uint3); +double2 __ovld __cnfn as_double2(uint4); +double2 __ovld __cnfn as_double2(long2); +double2 __ovld __cnfn as_double2(ulong2); +double2 __ovld __cnfn as_double2(float3); +double2 __ovld __cnfn as_double2(float4); +double2 __ovld __cnfn as_double2(double2); +double3 __ovld __cnfn as_double3(short16); +double3 __ovld __cnfn as_double3(ushort16); +double3 __ovld __cnfn as_double3(int8); +double3 __ovld __cnfn as_double3(uint8); +double3 __ovld __cnfn as_double3(long3); +double3 __ovld __cnfn as_double3(long4); +double3 __ovld __cnfn as_double3(ulong3); +double3 __ovld __cnfn as_double3(ulong4); +double3 __ovld __cnfn as_double3(float8); +double3 __ovld __cnfn as_double3(double3); +double3 __ovld __cnfn as_double3(double4); +double4 __ovld __cnfn as_double4(short16); +double4 __ovld __cnfn as_double4(ushort16); +double4 __ovld __cnfn as_double4(int8); +double4 __ovld __cnfn as_double4(uint8); +double4 __ovld __cnfn as_double4(long3); +double4 __ovld __cnfn as_double4(long4); +double4 __ovld __cnfn as_double4(ulong3); +double4 __ovld __cnfn as_double4(ulong4); +double4 __ovld __cnfn as_double4(float8); +double4 __ovld __cnfn as_double4(double3); +double4 __ovld __cnfn as_double4(double4); +double8 __ovld __cnfn as_double8(int16); +double8 __ovld __cnfn as_double8(uint16); +double8 __ovld __cnfn as_double8(long8); +double8 __ovld __cnfn as_double8(ulong8); +double8 __ovld __cnfn as_double8(float16); +double8 __ovld __cnfn as_double8(double8); +double16 __ovld __cnfn as_double16(long16); +double16 __ovld __cnfn as_double16(ulong16); +double16 __ovld __cnfn as_double16(double16); +#endif //cl_khr_fp64 + +#ifdef cl_khr_fp16 +char2 __ovld __cnfn as_char2(half); +char3 __ovld __cnfn as_char3(half2); +char4 __ovld __cnfn as_char4(half2); +char8 __ovld __cnfn as_char8(half3); +char8 __ovld __cnfn as_char8(half4); +char16 __ovld __cnfn as_char16(half8); +uchar2 __ovld __cnfn as_uchar2(half); +uchar3 __ovld __cnfn as_uchar3(half2); +uchar4 __ovld __cnfn as_uchar4(half2); +uchar8 __ovld __cnfn as_uchar8(half3); +uchar8 __ovld __cnfn as_uchar8(half4); +uchar16 __ovld __cnfn as_uchar16(half8); +short __ovld __cnfn as_short(half); +short2 __ovld __cnfn as_short2(half2); +short3 __ovld __cnfn as_short3(half3); +short3 __ovld __cnfn as_short3(half4); +short4 __ovld __cnfn as_short4(half3); +short4 __ovld __cnfn as_short4(half4); +short8 __ovld __cnfn as_short8(half8); +short16 __ovld __cnfn as_short16(half16); +ushort __ovld __cnfn as_ushort(half); +ushort2 __ovld __cnfn as_ushort2(half2); +ushort3 __ovld __cnfn as_ushort3(half3); +ushort3 __ovld __cnfn as_ushort3(half4); +ushort4 __ovld __cnfn as_ushort4(half3); +ushort4 __ovld __cnfn as_ushort4(half4); +ushort8 __ovld __cnfn as_ushort8(half8); +ushort16 __ovld __cnfn as_ushort16(half16); +int __ovld __cnfn as_int(half2); +int2 __ovld __cnfn as_int2(half3); +int2 __ovld __cnfn as_int2(half4); +int3 __ovld __cnfn as_int3(half8); +int4 __ovld __cnfn as_int4(half8); +int8 __ovld __cnfn as_int8(half16); +uint __ovld __cnfn as_uint(half2); +uint2 __ovld __cnfn as_uint2(half3); +uint2 __ovld __cnfn as_uint2(half4); +uint3 __ovld __cnfn as_uint3(half8); +uint4 __ovld __cnfn as_uint4(half8); +uint8 __ovld __cnfn as_uint8(half16); +long __ovld __cnfn as_long(half3); +long __ovld __cnfn as_long(half4); +long2 __ovld __cnfn as_long2(half8); +long3 __ovld __cnfn as_long3(half16); +long4 __ovld __cnfn as_long4(half16); +ulong __ovld __cnfn as_ulong(half3); +ulong __ovld __cnfn as_ulong(half4); +ulong2 __ovld __cnfn as_ulong2(half8); +ulong3 __ovld __cnfn as_ulong3(half16); +ulong4 __ovld __cnfn as_ulong4(half16); +half __ovld __cnfn as_half(char2); +half __ovld __cnfn as_half(uchar2); +half __ovld __cnfn as_half(short); +half __ovld __cnfn as_half(ushort); +half __ovld __cnfn as_half(half); +half2 __ovld __cnfn as_half2(char3); +half2 __ovld __cnfn as_half2(char4); +half2 __ovld __cnfn as_half2(uchar3); +half2 __ovld __cnfn as_half2(uchar4); +half2 __ovld __cnfn as_half2(short2); +half2 __ovld __cnfn as_half2(ushort2); +half2 __ovld __cnfn as_half2(int); +half2 __ovld __cnfn as_half2(uint); +half2 __ovld __cnfn as_half2(half2); +half2 __ovld __cnfn as_half2(float); +half3 __ovld __cnfn as_half3(char8); +half3 __ovld __cnfn as_half3(uchar8); +half3 __ovld __cnfn as_half3(short3); +half3 __ovld __cnfn as_half3(short4); +half3 __ovld __cnfn as_half3(ushort3); +half3 __ovld __cnfn as_half3(ushort4); +half3 __ovld __cnfn as_half3(int2); +half3 __ovld __cnfn as_half3(uint2); +half3 __ovld __cnfn as_half3(long); +half3 __ovld __cnfn as_half3(ulong); +half3 __ovld __cnfn as_half3(half3); +half3 __ovld __cnfn as_half3(half4); +half3 __ovld __cnfn as_half3(float2); +half4 __ovld __cnfn as_half4(char8); +half4 __ovld __cnfn as_half4(uchar8); +half4 __ovld __cnfn as_half4(short3); +half4 __ovld __cnfn as_half4(short4); +half4 __ovld __cnfn as_half4(ushort3); +half4 __ovld __cnfn as_half4(ushort4); +half4 __ovld __cnfn as_half4(int2); +half4 __ovld __cnfn as_half4(uint2); +half4 __ovld __cnfn as_half4(long); +half4 __ovld __cnfn as_half4(ulong); +half4 __ovld __cnfn as_half4(half3); +half4 __ovld __cnfn as_half4(half4); +half4 __ovld __cnfn as_half4(float2); +half8 __ovld __cnfn as_half8(char16); +half8 __ovld __cnfn as_half8(uchar16); +half8 __ovld __cnfn as_half8(short8); +half8 __ovld __cnfn as_half8(ushort8); +half8 __ovld __cnfn as_half8(int3); +half8 __ovld __cnfn as_half8(int4); +half8 __ovld __cnfn as_half8(uint3); +half8 __ovld __cnfn as_half8(uint4); +half8 __ovld __cnfn as_half8(long2); +half8 __ovld __cnfn as_half8(ulong2); +half8 __ovld __cnfn as_half8(half8); +half8 __ovld __cnfn as_half8(float3); +half8 __ovld __cnfn as_half8(float4); +half16 __ovld __cnfn as_half16(short16); +half16 __ovld __cnfn as_half16(ushort16); +half16 __ovld __cnfn as_half16(int8); +half16 __ovld __cnfn as_half16(uint8); +half16 __ovld __cnfn as_half16(long3); +half16 __ovld __cnfn as_half16(long4); +half16 __ovld __cnfn as_half16(ulong3); +half16 __ovld __cnfn as_half16(ulong4); +half16 __ovld __cnfn as_half16(half16); +half16 __ovld __cnfn as_half16(float8); +float __ovld __cnfn as_float(half2); +float2 __ovld __cnfn as_float2(half3); +float2 __ovld __cnfn as_float2(half4); +float3 __ovld __cnfn as_float3(half8); +float4 __ovld __cnfn as_float4(half8); +float8 __ovld __cnfn as_float8(half16); + +#ifdef cl_khr_fp64 +half3 __ovld __cnfn as_half3(double); +half4 __ovld __cnfn as_half4(double); +half8 __ovld __cnfn as_half8(double2); +half16 __ovld __cnfn as_half16(double3); +half16 __ovld __cnfn as_half16(double4); +double __ovld __cnfn as_double(half3); +double __ovld __cnfn as_double(half4); +double2 __ovld __cnfn as_double2(half8); +double3 __ovld __cnfn as_double3(half16); +double4 __ovld __cnfn as_double4(half16); +#endif //cl_khr_fp64 +#endif //cl_khr_fp16 + +// OpenCL v1.1 s6.9, v1.2/2.0 s6.10 - Function qualifiers + +#define __kernel_exec(X, typen) __kernel \ + __attribute__((work_group_size_hint(X, 1, 1))) \ + __attribute__((vec_type_hint(typen))) + +#define kernel_exec(X, typen) __kernel \ + __attribute__((work_group_size_hint(X, 1, 1))) \ + __attribute__((vec_type_hint(typen))) + +// OpenCL v1.1 s6.11.1, v1.2 s6.12.1, v2.0 s6.13.1 - Work-item Functions + +/** + * Returns the number of dimensions in use. This is the + * value given to the work_dim argument specified in + * clEnqueueNDRangeKernel. + * For clEnqueueTask, this returns 1. + */ +uint __ovld __cnfn get_work_dim(void); + +/** + * Returns the number of global work-items specified for + * dimension identified by dimindx. This value is given by + * the global_work_size argument to + * clEnqueueNDRangeKernel. Valid values of dimindx + * are 0 to get_work_dim() - 1. For other values of + * dimindx, get_global_size() returns 1. + * For clEnqueueTask, this always returns 1. + */ +size_t __ovld __cnfn get_global_size(uint dimindx); + +/** + * Returns the unique global work-item ID value for + * dimension identified by dimindx. The global work-item + * ID specifies the work-item ID based on the number of + * global work-items specified to execute the kernel. Valid + * values of dimindx are 0 to get_work_dim() - 1. For + * other values of dimindx, get_global_id() returns 0. + * For clEnqueueTask, this returns 0. + */ +size_t __ovld __cnfn get_global_id(uint dimindx); + +/** + * Returns the number of local work-items specified in + * dimension identified by dimindx. This value is given by + * the local_work_size argument to + * clEnqueueNDRangeKernel if local_work_size is not + * NULL; otherwise the OpenCL implementation chooses + * an appropriate local_work_size value which is returned + * by this function. Valid values of dimindx are 0 to + * get_work_dim() - 1. For other values of dimindx, + * get_local_size() returns 1. + * For clEnqueueTask, this always returns 1. + */ +size_t __ovld __cnfn get_local_size(uint dimindx); + +/** + * Returns the unique local work-item ID i.e. a work-item + * within a specific work-group for dimension identified by + * dimindx. Valid values of dimindx are 0 to + * get_work_dim() - 1. For other values of dimindx, + * get_local_id() returns 0. + * For clEnqueueTask, this returns 0. + */ +size_t __ovld __cnfn get_local_id(uint dimindx); + +/** + * Returns the number of work-groups that will execute a + * kernel for dimension identified by dimindx. + * Valid values of dimindx are 0 to get_work_dim() - 1. + * For other values of dimindx, get_num_groups () returns + * 1. + * For clEnqueueTask, this always returns 1. + */ +size_t __ovld __cnfn get_num_groups(uint dimindx); + +/** + * get_group_id returns the work-group ID which is a + * number from 0 .. get_num_groups(dimindx) - 1. + * Valid values of dimindx are 0 to get_work_dim() - 1. + * For other values, get_group_id() returns 0. + * For clEnqueueTask, this returns 0. + */ +size_t __ovld __cnfn get_group_id(uint dimindx); + +/** + * get_global_offset returns the offset values specified in + * global_work_offset argument to + * clEnqueueNDRangeKernel. + * Valid values of dimindx are 0 to get_work_dim() - 1. + * For other values, get_global_offset() returns 0. + * For clEnqueueTask, this returns 0. + */ +size_t __ovld __cnfn get_global_offset(uint dimindx); + +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +size_t __ovld get_enqueued_local_size(uint dimindx); +size_t __ovld get_global_linear_id(void); +size_t __ovld get_local_linear_id(void); +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +// OpenCL v1.1 s6.11.2, v1.2 s6.12.2, v2.0 s6.13.2 - Math functions + +/** + * Arc cosine function. + */ +float __ovld __cnfn acos(float); +float2 __ovld __cnfn acos(float2); +float3 __ovld __cnfn acos(float3); +float4 __ovld __cnfn acos(float4); +float8 __ovld __cnfn acos(float8); +float16 __ovld __cnfn acos(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn acos(double); +double2 __ovld __cnfn acos(double2); +double3 __ovld __cnfn acos(double3); +double4 __ovld __cnfn acos(double4); +double8 __ovld __cnfn acos(double8); +double16 __ovld __cnfn acos(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn acos(half); +half2 __ovld __cnfn acos(half2); +half3 __ovld __cnfn acos(half3); +half4 __ovld __cnfn acos(half4); +half8 __ovld __cnfn acos(half8); +half16 __ovld __cnfn acos(half16); +#endif //cl_khr_fp16 + +/** + * Inverse hyperbolic cosine. + */ +float __ovld __cnfn acosh(float); +float2 __ovld __cnfn acosh(float2); +float3 __ovld __cnfn acosh(float3); +float4 __ovld __cnfn acosh(float4); +float8 __ovld __cnfn acosh(float8); +float16 __ovld __cnfn acosh(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn acosh(double); +double2 __ovld __cnfn acosh(double2); +double3 __ovld __cnfn acosh(double3); +double4 __ovld __cnfn acosh(double4); +double8 __ovld __cnfn acosh(double8); +double16 __ovld __cnfn acosh(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn acosh(half); +half2 __ovld __cnfn acosh(half2); +half3 __ovld __cnfn acosh(half3); +half4 __ovld __cnfn acosh(half4); +half8 __ovld __cnfn acosh(half8); +half16 __ovld __cnfn acosh(half16); +#endif //cl_khr_fp16 + +/** + * Compute acos (x) / PI. + */ +float __ovld __cnfn acospi(float x); +float2 __ovld __cnfn acospi(float2 x); +float3 __ovld __cnfn acospi(float3 x); +float4 __ovld __cnfn acospi(float4 x); +float8 __ovld __cnfn acospi(float8 x); +float16 __ovld __cnfn acospi(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn acospi(double x); +double2 __ovld __cnfn acospi(double2 x); +double3 __ovld __cnfn acospi(double3 x); +double4 __ovld __cnfn acospi(double4 x); +double8 __ovld __cnfn acospi(double8 x); +double16 __ovld __cnfn acospi(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn acospi(half x); +half2 __ovld __cnfn acospi(half2 x); +half3 __ovld __cnfn acospi(half3 x); +half4 __ovld __cnfn acospi(half4 x); +half8 __ovld __cnfn acospi(half8 x); +half16 __ovld __cnfn acospi(half16 x); +#endif //cl_khr_fp16 + +/** + * Arc sine function. + */ +float __ovld __cnfn asin(float); +float2 __ovld __cnfn asin(float2); +float3 __ovld __cnfn asin(float3); +float4 __ovld __cnfn asin(float4); +float8 __ovld __cnfn asin(float8); +float16 __ovld __cnfn asin(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn asin(double); +double2 __ovld __cnfn asin(double2); +double3 __ovld __cnfn asin(double3); +double4 __ovld __cnfn asin(double4); +double8 __ovld __cnfn asin(double8); +double16 __ovld __cnfn asin(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn asin(half); +half2 __ovld __cnfn asin(half2); +half3 __ovld __cnfn asin(half3); +half4 __ovld __cnfn asin(half4); +half8 __ovld __cnfn asin(half8); +half16 __ovld __cnfn asin(half16); +#endif //cl_khr_fp16 + +/** + * Inverse hyperbolic sine. + */ +float __ovld __cnfn asinh(float); +float2 __ovld __cnfn asinh(float2); +float3 __ovld __cnfn asinh(float3); +float4 __ovld __cnfn asinh(float4); +float8 __ovld __cnfn asinh(float8); +float16 __ovld __cnfn asinh(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn asinh(double); +double2 __ovld __cnfn asinh(double2); +double3 __ovld __cnfn asinh(double3); +double4 __ovld __cnfn asinh(double4); +double8 __ovld __cnfn asinh(double8); +double16 __ovld __cnfn asinh(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn asinh(half); +half2 __ovld __cnfn asinh(half2); +half3 __ovld __cnfn asinh(half3); +half4 __ovld __cnfn asinh(half4); +half8 __ovld __cnfn asinh(half8); +half16 __ovld __cnfn asinh(half16); +#endif //cl_khr_fp16 + +/** + * Compute asin (x) / PI. + */ +float __ovld __cnfn asinpi(float x); +float2 __ovld __cnfn asinpi(float2 x); +float3 __ovld __cnfn asinpi(float3 x); +float4 __ovld __cnfn asinpi(float4 x); +float8 __ovld __cnfn asinpi(float8 x); +float16 __ovld __cnfn asinpi(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn asinpi(double x); +double2 __ovld __cnfn asinpi(double2 x); +double3 __ovld __cnfn asinpi(double3 x); +double4 __ovld __cnfn asinpi(double4 x); +double8 __ovld __cnfn asinpi(double8 x); +double16 __ovld __cnfn asinpi(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn asinpi(half x); +half2 __ovld __cnfn asinpi(half2 x); +half3 __ovld __cnfn asinpi(half3 x); +half4 __ovld __cnfn asinpi(half4 x); +half8 __ovld __cnfn asinpi(half8 x); +half16 __ovld __cnfn asinpi(half16 x); +#endif //cl_khr_fp16 + +/** + * Arc tangent function. + */ +float __ovld __cnfn atan(float y_over_x); +float2 __ovld __cnfn atan(float2 y_over_x); +float3 __ovld __cnfn atan(float3 y_over_x); +float4 __ovld __cnfn atan(float4 y_over_x); +float8 __ovld __cnfn atan(float8 y_over_x); +float16 __ovld __cnfn atan(float16 y_over_x); +#ifdef cl_khr_fp64 +double __ovld __cnfn atan(double y_over_x); +double2 __ovld __cnfn atan(double2 y_over_x); +double3 __ovld __cnfn atan(double3 y_over_x); +double4 __ovld __cnfn atan(double4 y_over_x); +double8 __ovld __cnfn atan(double8 y_over_x); +double16 __ovld __cnfn atan(double16 y_over_x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn atan(half y_over_x); +half2 __ovld __cnfn atan(half2 y_over_x); +half3 __ovld __cnfn atan(half3 y_over_x); +half4 __ovld __cnfn atan(half4 y_over_x); +half8 __ovld __cnfn atan(half8 y_over_x); +half16 __ovld __cnfn atan(half16 y_over_x); +#endif //cl_khr_fp16 + +/** + * Arc tangent of y / x. + */ +float __ovld __cnfn atan2(float y, float x); +float2 __ovld __cnfn atan2(float2 y, float2 x); +float3 __ovld __cnfn atan2(float3 y, float3 x); +float4 __ovld __cnfn atan2(float4 y, float4 x); +float8 __ovld __cnfn atan2(float8 y, float8 x); +float16 __ovld __cnfn atan2(float16 y, float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn atan2(double y, double x); +double2 __ovld __cnfn atan2(double2 y, double2 x); +double3 __ovld __cnfn atan2(double3 y, double3 x); +double4 __ovld __cnfn atan2(double4 y, double4 x); +double8 __ovld __cnfn atan2(double8 y, double8 x); +double16 __ovld __cnfn atan2(double16 y, double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn atan2(half y, half x); +half2 __ovld __cnfn atan2(half2 y, half2 x); +half3 __ovld __cnfn atan2(half3 y, half3 x); +half4 __ovld __cnfn atan2(half4 y, half4 x); +half8 __ovld __cnfn atan2(half8 y, half8 x); +half16 __ovld __cnfn atan2(half16 y, half16 x); +#endif //cl_khr_fp16 + +/** + * Hyperbolic arc tangent. + */ +float __ovld __cnfn atanh(float); +float2 __ovld __cnfn atanh(float2); +float3 __ovld __cnfn atanh(float3); +float4 __ovld __cnfn atanh(float4); +float8 __ovld __cnfn atanh(float8); +float16 __ovld __cnfn atanh(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn atanh(double); +double2 __ovld __cnfn atanh(double2); +double3 __ovld __cnfn atanh(double3); +double4 __ovld __cnfn atanh(double4); +double8 __ovld __cnfn atanh(double8); +double16 __ovld __cnfn atanh(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn atanh(half); +half2 __ovld __cnfn atanh(half2); +half3 __ovld __cnfn atanh(half3); +half4 __ovld __cnfn atanh(half4); +half8 __ovld __cnfn atanh(half8); +half16 __ovld __cnfn atanh(half16); +#endif //cl_khr_fp16 + +/** + * Compute atan (x) / PI. + */ +float __ovld __cnfn atanpi(float x); +float2 __ovld __cnfn atanpi(float2 x); +float3 __ovld __cnfn atanpi(float3 x); +float4 __ovld __cnfn atanpi(float4 x); +float8 __ovld __cnfn atanpi(float8 x); +float16 __ovld __cnfn atanpi(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn atanpi(double x); +double2 __ovld __cnfn atanpi(double2 x); +double3 __ovld __cnfn atanpi(double3 x); +double4 __ovld __cnfn atanpi(double4 x); +double8 __ovld __cnfn atanpi(double8 x); +double16 __ovld __cnfn atanpi(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn atanpi(half x); +half2 __ovld __cnfn atanpi(half2 x); +half3 __ovld __cnfn atanpi(half3 x); +half4 __ovld __cnfn atanpi(half4 x); +half8 __ovld __cnfn atanpi(half8 x); +half16 __ovld __cnfn atanpi(half16 x); +#endif //cl_khr_fp16 + +/** + * Compute atan2 (y, x) / PI. + */ +float __ovld __cnfn atan2pi(float y, float x); +float2 __ovld __cnfn atan2pi(float2 y, float2 x); +float3 __ovld __cnfn atan2pi(float3 y, float3 x); +float4 __ovld __cnfn atan2pi(float4 y, float4 x); +float8 __ovld __cnfn atan2pi(float8 y, float8 x); +float16 __ovld __cnfn atan2pi(float16 y, float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn atan2pi(double y, double x); +double2 __ovld __cnfn atan2pi(double2 y, double2 x); +double3 __ovld __cnfn atan2pi(double3 y, double3 x); +double4 __ovld __cnfn atan2pi(double4 y, double4 x); +double8 __ovld __cnfn atan2pi(double8 y, double8 x); +double16 __ovld __cnfn atan2pi(double16 y, double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn atan2pi(half y, half x); +half2 __ovld __cnfn atan2pi(half2 y, half2 x); +half3 __ovld __cnfn atan2pi(half3 y, half3 x); +half4 __ovld __cnfn atan2pi(half4 y, half4 x); +half8 __ovld __cnfn atan2pi(half8 y, half8 x); +half16 __ovld __cnfn atan2pi(half16 y, half16 x); +#endif //cl_khr_fp16 + +/** + * Compute cube-root. + */ +float __ovld __cnfn cbrt(float); +float2 __ovld __cnfn cbrt(float2); +float3 __ovld __cnfn cbrt(float3); +float4 __ovld __cnfn cbrt(float4); +float8 __ovld __cnfn cbrt(float8); +float16 __ovld __cnfn cbrt(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn cbrt(double); +double2 __ovld __cnfn cbrt(double2); +double3 __ovld __cnfn cbrt(double3); +double4 __ovld __cnfn cbrt(double4); +double8 __ovld __cnfn cbrt(double8); +double16 __ovld __cnfn cbrt(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn cbrt(half); +half2 __ovld __cnfn cbrt(half2); +half3 __ovld __cnfn cbrt(half3); +half4 __ovld __cnfn cbrt(half4); +half8 __ovld __cnfn cbrt(half8); +half16 __ovld __cnfn cbrt(half16); +#endif //cl_khr_fp16 + +/** + * Round to integral value using the round to positive + * infinity rounding mode. + */ +float __ovld __cnfn ceil(float); +float2 __ovld __cnfn ceil(float2); +float3 __ovld __cnfn ceil(float3); +float4 __ovld __cnfn ceil(float4); +float8 __ovld __cnfn ceil(float8); +float16 __ovld __cnfn ceil(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn ceil(double); +double2 __ovld __cnfn ceil(double2); +double3 __ovld __cnfn ceil(double3); +double4 __ovld __cnfn ceil(double4); +double8 __ovld __cnfn ceil(double8); +double16 __ovld __cnfn ceil(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn ceil(half); +half2 __ovld __cnfn ceil(half2); +half3 __ovld __cnfn ceil(half3); +half4 __ovld __cnfn ceil(half4); +half8 __ovld __cnfn ceil(half8); +half16 __ovld __cnfn ceil(half16); +#endif //cl_khr_fp16 + +/** + * Returns x with its sign changed to match the sign of y. + */ +float __ovld __cnfn copysign(float x, float y); +float2 __ovld __cnfn copysign(float2 x, float2 y); +float3 __ovld __cnfn copysign(float3 x, float3 y); +float4 __ovld __cnfn copysign(float4 x, float4 y); +float8 __ovld __cnfn copysign(float8 x, float8 y); +float16 __ovld __cnfn copysign(float16 x, float16 y); +#ifdef cl_khr_fp64 +double __ovld __cnfn copysign(double x, double y); +double2 __ovld __cnfn copysign(double2 x, double2 y); +double3 __ovld __cnfn copysign(double3 x, double3 y); +double4 __ovld __cnfn copysign(double4 x, double4 y); +double8 __ovld __cnfn copysign(double8 x, double8 y); +double16 __ovld __cnfn copysign(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn copysign(half x, half y); +half2 __ovld __cnfn copysign(half2 x, half2 y); +half3 __ovld __cnfn copysign(half3 x, half3 y); +half4 __ovld __cnfn copysign(half4 x, half4 y); +half8 __ovld __cnfn copysign(half8 x, half8 y); +half16 __ovld __cnfn copysign(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Compute cosine. + */ +float __ovld __cnfn cos(float); +float2 __ovld __cnfn cos(float2); +float3 __ovld __cnfn cos(float3); +float4 __ovld __cnfn cos(float4); +float8 __ovld __cnfn cos(float8); +float16 __ovld __cnfn cos(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn cos(double); +double2 __ovld __cnfn cos(double2); +double3 __ovld __cnfn cos(double3); +double4 __ovld __cnfn cos(double4); +double8 __ovld __cnfn cos(double8); +double16 __ovld __cnfn cos(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn cos(half); +half2 __ovld __cnfn cos(half2); +half3 __ovld __cnfn cos(half3); +half4 __ovld __cnfn cos(half4); +half8 __ovld __cnfn cos(half8); +half16 __ovld __cnfn cos(half16); +#endif //cl_khr_fp16 + +/** + * Compute hyperbolic cosine. + */ +float __ovld __cnfn cosh(float); +float2 __ovld __cnfn cosh(float2); +float3 __ovld __cnfn cosh(float3); +float4 __ovld __cnfn cosh(float4); +float8 __ovld __cnfn cosh(float8); +float16 __ovld __cnfn cosh(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn cosh(double); +double2 __ovld __cnfn cosh(double2); +double3 __ovld __cnfn cosh(double3); +double4 __ovld __cnfn cosh(double4); +double8 __ovld __cnfn cosh(double8); +double16 __ovld __cnfn cosh(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn cosh(half); +half2 __ovld __cnfn cosh(half2); +half3 __ovld __cnfn cosh(half3); +half4 __ovld __cnfn cosh(half4); +half8 __ovld __cnfn cosh(half8); +half16 __ovld __cnfn cosh(half16); +#endif //cl_khr_fp16 + +/** + * Compute cos (PI * x). + */ +float __ovld __cnfn cospi(float x); +float2 __ovld __cnfn cospi(float2 x); +float3 __ovld __cnfn cospi(float3 x); +float4 __ovld __cnfn cospi(float4 x); +float8 __ovld __cnfn cospi(float8 x); +float16 __ovld __cnfn cospi(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn cospi(double x); +double2 __ovld __cnfn cospi(double2 x); +double3 __ovld __cnfn cospi(double3 x); +double4 __ovld __cnfn cospi(double4 x); +double8 __ovld __cnfn cospi(double8 x); +double16 __ovld __cnfn cospi(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn cospi(half x); +half2 __ovld __cnfn cospi(half2 x); +half3 __ovld __cnfn cospi(half3 x); +half4 __ovld __cnfn cospi(half4 x); +half8 __ovld __cnfn cospi(half8 x); +half16 __ovld __cnfn cospi(half16 x); +#endif //cl_khr_fp16 + +/** + * Complementary error function. + */ +float __ovld __cnfn erfc(float); +float2 __ovld __cnfn erfc(float2); +float3 __ovld __cnfn erfc(float3); +float4 __ovld __cnfn erfc(float4); +float8 __ovld __cnfn erfc(float8); +float16 __ovld __cnfn erfc(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn erfc(double); +double2 __ovld __cnfn erfc(double2); +double3 __ovld __cnfn erfc(double3); +double4 __ovld __cnfn erfc(double4); +double8 __ovld __cnfn erfc(double8); +double16 __ovld __cnfn erfc(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn erfc(half); +half2 __ovld __cnfn erfc(half2); +half3 __ovld __cnfn erfc(half3); +half4 __ovld __cnfn erfc(half4); +half8 __ovld __cnfn erfc(half8); +half16 __ovld __cnfn erfc(half16); +#endif //cl_khr_fp16 + +/** + * Error function encountered in integrating the + * normal distribution. + */ +float __ovld __cnfn erf(float); +float2 __ovld __cnfn erf(float2); +float3 __ovld __cnfn erf(float3); +float4 __ovld __cnfn erf(float4); +float8 __ovld __cnfn erf(float8); +float16 __ovld __cnfn erf(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn erf(double); +double2 __ovld __cnfn erf(double2); +double3 __ovld __cnfn erf(double3); +double4 __ovld __cnfn erf(double4); +double8 __ovld __cnfn erf(double8); +double16 __ovld __cnfn erf(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn erf(half); +half2 __ovld __cnfn erf(half2); +half3 __ovld __cnfn erf(half3); +half4 __ovld __cnfn erf(half4); +half8 __ovld __cnfn erf(half8); +half16 __ovld __cnfn erf(half16); +#endif //cl_khr_fp16 + +/** + * Compute the base e exponential function of x. + */ +float __ovld __cnfn exp(float x); +float2 __ovld __cnfn exp(float2 x); +float3 __ovld __cnfn exp(float3 x); +float4 __ovld __cnfn exp(float4 x); +float8 __ovld __cnfn exp(float8 x); +float16 __ovld __cnfn exp(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn exp(double x); +double2 __ovld __cnfn exp(double2 x); +double3 __ovld __cnfn exp(double3 x); +double4 __ovld __cnfn exp(double4 x); +double8 __ovld __cnfn exp(double8 x); +double16 __ovld __cnfn exp(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn exp(half x); +half2 __ovld __cnfn exp(half2 x); +half3 __ovld __cnfn exp(half3 x); +half4 __ovld __cnfn exp(half4 x); +half8 __ovld __cnfn exp(half8 x); +half16 __ovld __cnfn exp(half16 x); +#endif //cl_khr_fp16 + +/** + * Exponential base 2 function. + */ +float __ovld __cnfn exp2(float); +float2 __ovld __cnfn exp2(float2); +float3 __ovld __cnfn exp2(float3); +float4 __ovld __cnfn exp2(float4); +float8 __ovld __cnfn exp2(float8); +float16 __ovld __cnfn exp2(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn exp2(double); +double2 __ovld __cnfn exp2(double2); +double3 __ovld __cnfn exp2(double3); +double4 __ovld __cnfn exp2(double4); +double8 __ovld __cnfn exp2(double8); +double16 __ovld __cnfn exp2(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn exp2(half); +half2 __ovld __cnfn exp2(half2); +half3 __ovld __cnfn exp2(half3); +half4 __ovld __cnfn exp2(half4); +half8 __ovld __cnfn exp2(half8); +half16 __ovld __cnfn exp2(half16); +#endif //cl_khr_fp16 + +/** + * Exponential base 10 function. + */ +float __ovld __cnfn exp10(float); +float2 __ovld __cnfn exp10(float2); +float3 __ovld __cnfn exp10(float3); +float4 __ovld __cnfn exp10(float4); +float8 __ovld __cnfn exp10(float8); +float16 __ovld __cnfn exp10(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn exp10(double); +double2 __ovld __cnfn exp10(double2); +double3 __ovld __cnfn exp10(double3); +double4 __ovld __cnfn exp10(double4); +double8 __ovld __cnfn exp10(double8); +double16 __ovld __cnfn exp10(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn exp10(half); +half2 __ovld __cnfn exp10(half2); +half3 __ovld __cnfn exp10(half3); +half4 __ovld __cnfn exp10(half4); +half8 __ovld __cnfn exp10(half8); +half16 __ovld __cnfn exp10(half16); +#endif //cl_khr_fp16 + +/** + * Compute e^x- 1.0. + */ +float __ovld __cnfn expm1(float x); +float2 __ovld __cnfn expm1(float2 x); +float3 __ovld __cnfn expm1(float3 x); +float4 __ovld __cnfn expm1(float4 x); +float8 __ovld __cnfn expm1(float8 x); +float16 __ovld __cnfn expm1(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn expm1(double x); +double2 __ovld __cnfn expm1(double2 x); +double3 __ovld __cnfn expm1(double3 x); +double4 __ovld __cnfn expm1(double4 x); +double8 __ovld __cnfn expm1(double8 x); +double16 __ovld __cnfn expm1(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn expm1(half x); +half2 __ovld __cnfn expm1(half2 x); +half3 __ovld __cnfn expm1(half3 x); +half4 __ovld __cnfn expm1(half4 x); +half8 __ovld __cnfn expm1(half8 x); +half16 __ovld __cnfn expm1(half16 x); +#endif //cl_khr_fp16 + +/** + * Compute absolute value of a floating-point number. + */ +float __ovld __cnfn fabs(float); +float2 __ovld __cnfn fabs(float2); +float3 __ovld __cnfn fabs(float3); +float4 __ovld __cnfn fabs(float4); +float8 __ovld __cnfn fabs(float8); +float16 __ovld __cnfn fabs(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn fabs(double); +double2 __ovld __cnfn fabs(double2); +double3 __ovld __cnfn fabs(double3); +double4 __ovld __cnfn fabs(double4); +double8 __ovld __cnfn fabs(double8); +double16 __ovld __cnfn fabs(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn fabs(half); +half2 __ovld __cnfn fabs(half2); +half3 __ovld __cnfn fabs(half3); +half4 __ovld __cnfn fabs(half4); +half8 __ovld __cnfn fabs(half8); +half16 __ovld __cnfn fabs(half16); +#endif //cl_khr_fp16 + +/** + * x - y if x > y, +0 if x is less than or equal to y. + */ +float __ovld __cnfn fdim(float x, float y); +float2 __ovld __cnfn fdim(float2 x, float2 y); +float3 __ovld __cnfn fdim(float3 x, float3 y); +float4 __ovld __cnfn fdim(float4 x, float4 y); +float8 __ovld __cnfn fdim(float8 x, float8 y); +float16 __ovld __cnfn fdim(float16 x, float16 y); +#ifdef cl_khr_fp64 +double __ovld __cnfn fdim(double x, double y); +double2 __ovld __cnfn fdim(double2 x, double2 y); +double3 __ovld __cnfn fdim(double3 x, double3 y); +double4 __ovld __cnfn fdim(double4 x, double4 y); +double8 __ovld __cnfn fdim(double8 x, double8 y); +double16 __ovld __cnfn fdim(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn fdim(half x, half y); +half2 __ovld __cnfn fdim(half2 x, half2 y); +half3 __ovld __cnfn fdim(half3 x, half3 y); +half4 __ovld __cnfn fdim(half4 x, half4 y); +half8 __ovld __cnfn fdim(half8 x, half8 y); +half16 __ovld __cnfn fdim(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Round to integral value using the round to -ve + * infinity rounding mode. + */ +float __ovld __cnfn floor(float); +float2 __ovld __cnfn floor(float2); +float3 __ovld __cnfn floor(float3); +float4 __ovld __cnfn floor(float4); +float8 __ovld __cnfn floor(float8); +float16 __ovld __cnfn floor(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn floor(double); +double2 __ovld __cnfn floor(double2); +double3 __ovld __cnfn floor(double3); +double4 __ovld __cnfn floor(double4); +double8 __ovld __cnfn floor(double8); +double16 __ovld __cnfn floor(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn floor(half); +half2 __ovld __cnfn floor(half2); +half3 __ovld __cnfn floor(half3); +half4 __ovld __cnfn floor(half4); +half8 __ovld __cnfn floor(half8); +half16 __ovld __cnfn floor(half16); +#endif //cl_khr_fp16 + +/** + * Returns the correctly rounded floating-point + * representation of the sum of c with the infinitely + * precise product of a and b. Rounding of + * intermediate products shall not occur. Edge case + * behavior is per the IEEE 754-2008 standard. + */ +float __ovld __cnfn fma(float a, float b, float c); +float2 __ovld __cnfn fma(float2 a, float2 b, float2 c); +float3 __ovld __cnfn fma(float3 a, float3 b, float3 c); +float4 __ovld __cnfn fma(float4 a, float4 b, float4 c); +float8 __ovld __cnfn fma(float8 a, float8 b, float8 c); +float16 __ovld __cnfn fma(float16 a, float16 b, float16 c); +#ifdef cl_khr_fp64 +double __ovld __cnfn fma(double a, double b, double c); +double2 __ovld __cnfn fma(double2 a, double2 b, double2 c); +double3 __ovld __cnfn fma(double3 a, double3 b, double3 c); +double4 __ovld __cnfn fma(double4 a, double4 b, double4 c); +double8 __ovld __cnfn fma(double8 a, double8 b, double8 c); +double16 __ovld __cnfn fma(double16 a, double16 b, double16 c); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn fma(half a, half b, half c); +half2 __ovld __cnfn fma(half2 a, half2 b, half2 c); +half3 __ovld __cnfn fma(half3 a, half3 b, half3 c); +half4 __ovld __cnfn fma(half4 a, half4 b, half4 c); +half8 __ovld __cnfn fma(half8 a, half8 b, half8 c); +half16 __ovld __cnfn fma(half16 a, half16 b, half16 c); +#endif //cl_khr_fp16 + +/** + * Returns y if x < y, otherwise it returns x. If one + * argument is a NaN, fmax() returns the other + * argument. If both arguments are NaNs, fmax() + * returns a NaN. + */ +float __ovld __cnfn fmax(float x, float y); +float2 __ovld __cnfn fmax(float2 x, float2 y); +float3 __ovld __cnfn fmax(float3 x, float3 y); +float4 __ovld __cnfn fmax(float4 x, float4 y); +float8 __ovld __cnfn fmax(float8 x, float8 y); +float16 __ovld __cnfn fmax(float16 x, float16 y); +float2 __ovld __cnfn fmax(float2 x, float y); +float3 __ovld __cnfn fmax(float3 x, float y); +float4 __ovld __cnfn fmax(float4 x, float y); +float8 __ovld __cnfn fmax(float8 x, float y); +float16 __ovld __cnfn fmax(float16 x, float y); +#ifdef cl_khr_fp64 +double __ovld __cnfn fmax(double x, double y); +double2 __ovld __cnfn fmax(double2 x, double2 y); +double3 __ovld __cnfn fmax(double3 x, double3 y); +double4 __ovld __cnfn fmax(double4 x, double4 y); +double8 __ovld __cnfn fmax(double8 x, double8 y); +double16 __ovld __cnfn fmax(double16 x, double16 y); +double2 __ovld __cnfn fmax(double2 x, double y); +double3 __ovld __cnfn fmax(double3 x, double y); +double4 __ovld __cnfn fmax(double4 x, double y); +double8 __ovld __cnfn fmax(double8 x, double y); +double16 __ovld __cnfn fmax(double16 x, double y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn fmax(half x, half y); +half2 __ovld __cnfn fmax(half2 x, half2 y); +half3 __ovld __cnfn fmax(half3 x, half3 y); +half4 __ovld __cnfn fmax(half4 x, half4 y); +half8 __ovld __cnfn fmax(half8 x, half8 y); +half16 __ovld __cnfn fmax(half16 x, half16 y); +half2 __ovld __cnfn fmax(half2 x, half y); +half3 __ovld __cnfn fmax(half3 x, half y); +half4 __ovld __cnfn fmax(half4 x, half y); +half8 __ovld __cnfn fmax(half8 x, half y); +half16 __ovld __cnfn fmax(half16 x, half y); +#endif //cl_khr_fp16 + +/** + * Returns y if y < x, otherwise it returns x. If one + * argument is a NaN, fmin() returns the other + * argument. If both arguments are NaNs, fmin() + * returns a NaN. + */ +float __ovld __cnfn fmin(float x, float y); +float2 __ovld __cnfn fmin(float2 x, float2 y); +float3 __ovld __cnfn fmin(float3 x, float3 y); +float4 __ovld __cnfn fmin(float4 x, float4 y); +float8 __ovld __cnfn fmin(float8 x, float8 y); +float16 __ovld __cnfn fmin(float16 x, float16 y); +float2 __ovld __cnfn fmin(float2 x, float y); +float3 __ovld __cnfn fmin(float3 x, float y); +float4 __ovld __cnfn fmin(float4 x, float y); +float8 __ovld __cnfn fmin(float8 x, float y); +float16 __ovld __cnfn fmin(float16 x, float y); +#ifdef cl_khr_fp64 +double __ovld __cnfn fmin(double x, double y); +double2 __ovld __cnfn fmin(double2 x, double2 y); +double3 __ovld __cnfn fmin(double3 x, double3 y); +double4 __ovld __cnfn fmin(double4 x, double4 y); +double8 __ovld __cnfn fmin(double8 x, double8 y); +double16 __ovld __cnfn fmin(double16 x, double16 y); +double2 __ovld __cnfn fmin(double2 x, double y); +double3 __ovld __cnfn fmin(double3 x, double y); +double4 __ovld __cnfn fmin(double4 x, double y); +double8 __ovld __cnfn fmin(double8 x, double y); +double16 __ovld __cnfn fmin(double16 x, double y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn fmin(half x, half y); +half2 __ovld __cnfn fmin(half2 x, half2 y); +half3 __ovld __cnfn fmin(half3 x, half3 y); +half4 __ovld __cnfn fmin(half4 x, half4 y); +half8 __ovld __cnfn fmin(half8 x, half8 y); +half16 __ovld __cnfn fmin(half16 x, half16 y); +half2 __ovld __cnfn fmin(half2 x, half y); +half3 __ovld __cnfn fmin(half3 x, half y); +half4 __ovld __cnfn fmin(half4 x, half y); +half8 __ovld __cnfn fmin(half8 x, half y); +half16 __ovld __cnfn fmin(half16 x, half y); +#endif //cl_khr_fp16 + +/** + * Modulus. Returns x - y * trunc (x/y). + */ +float __ovld __cnfn fmod(float x, float y); +float2 __ovld __cnfn fmod(float2 x, float2 y); +float3 __ovld __cnfn fmod(float3 x, float3 y); +float4 __ovld __cnfn fmod(float4 x, float4 y); +float8 __ovld __cnfn fmod(float8 x, float8 y); +float16 __ovld __cnfn fmod(float16 x, float16 y); +#ifdef cl_khr_fp64 +double __ovld __cnfn fmod(double x, double y); +double2 __ovld __cnfn fmod(double2 x, double2 y); +double3 __ovld __cnfn fmod(double3 x, double3 y); +double4 __ovld __cnfn fmod(double4 x, double4 y); +double8 __ovld __cnfn fmod(double8 x, double8 y); +double16 __ovld __cnfn fmod(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn fmod(half x, half y); +half2 __ovld __cnfn fmod(half2 x, half2 y); +half3 __ovld __cnfn fmod(half3 x, half3 y); +half4 __ovld __cnfn fmod(half4 x, half4 y); +half8 __ovld __cnfn fmod(half8 x, half8 y); +half16 __ovld __cnfn fmod(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Returns fmin(x - floor (x), 0x1.fffffep-1f ). + * floor(x) is returned in iptr. + */ +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +float __ovld fract(float x, float *iptr); +float2 __ovld fract(float2 x, float2 *iptr); +float3 __ovld fract(float3 x, float3 *iptr); +float4 __ovld fract(float4 x, float4 *iptr); +float8 __ovld fract(float8 x, float8 *iptr); +float16 __ovld fract(float16 x, float16 *iptr); +#ifdef cl_khr_fp64 +double __ovld fract(double x, double *iptr); +double2 __ovld fract(double2 x, double2 *iptr); +double3 __ovld fract(double3 x, double3 *iptr); +double4 __ovld fract(double4 x, double4 *iptr); +double8 __ovld fract(double8 x, double8 *iptr); +double16 __ovld fract(double16 x, double16 *iptr); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld fract(half x, half *iptr); +half2 __ovld fract(half2 x, half2 *iptr); +half3 __ovld fract(half3 x, half3 *iptr); +half4 __ovld fract(half4 x, half4 *iptr); +half8 __ovld fract(half8 x, half8 *iptr); +half16 __ovld fract(half16 x, half16 *iptr); +#endif //cl_khr_fp16 +#else +float __ovld fract(float x, __global float *iptr); +float2 __ovld fract(float2 x, __global float2 *iptr); +float3 __ovld fract(float3 x, __global float3 *iptr); +float4 __ovld fract(float4 x, __global float4 *iptr); +float8 __ovld fract(float8 x, __global float8 *iptr); +float16 __ovld fract(float16 x, __global float16 *iptr); +float __ovld fract(float x, __local float *iptr); +float2 __ovld fract(float2 x, __local float2 *iptr); +float3 __ovld fract(float3 x, __local float3 *iptr); +float4 __ovld fract(float4 x, __local float4 *iptr); +float8 __ovld fract(float8 x, __local float8 *iptr); +float16 __ovld fract(float16 x, __local float16 *iptr); +float __ovld fract(float x, __private float *iptr); +float2 __ovld fract(float2 x, __private float2 *iptr); +float3 __ovld fract(float3 x, __private float3 *iptr); +float4 __ovld fract(float4 x, __private float4 *iptr); +float8 __ovld fract(float8 x, __private float8 *iptr); +float16 __ovld fract(float16 x, __private float16 *iptr); +#ifdef cl_khr_fp64 +double __ovld fract(double x, __global double *iptr); +double2 __ovld fract(double2 x, __global double2 *iptr); +double3 __ovld fract(double3 x, __global double3 *iptr); +double4 __ovld fract(double4 x, __global double4 *iptr); +double8 __ovld fract(double8 x, __global double8 *iptr); +double16 __ovld fract(double16 x, __global double16 *iptr); +double __ovld fract(double x, __local double *iptr); +double2 __ovld fract(double2 x, __local double2 *iptr); +double3 __ovld fract(double3 x, __local double3 *iptr); +double4 __ovld fract(double4 x, __local double4 *iptr); +double8 __ovld fract(double8 x, __local double8 *iptr); +double16 __ovld fract(double16 x, __local double16 *iptr); +double __ovld fract(double x, __private double *iptr); +double2 __ovld fract(double2 x, __private double2 *iptr); +double3 __ovld fract(double3 x, __private double3 *iptr); +double4 __ovld fract(double4 x, __private double4 *iptr); +double8 __ovld fract(double8 x, __private double8 *iptr); +double16 __ovld fract(double16 x, __private double16 *iptr); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld fract(half x, __global half *iptr); +half2 __ovld fract(half2 x, __global half2 *iptr); +half3 __ovld fract(half3 x, __global half3 *iptr); +half4 __ovld fract(half4 x, __global half4 *iptr); +half8 __ovld fract(half8 x, __global half8 *iptr); +half16 __ovld fract(half16 x, __global half16 *iptr); +half __ovld fract(half x, __local half *iptr); +half2 __ovld fract(half2 x, __local half2 *iptr); +half3 __ovld fract(half3 x, __local half3 *iptr); +half4 __ovld fract(half4 x, __local half4 *iptr); +half8 __ovld fract(half8 x, __local half8 *iptr); +half16 __ovld fract(half16 x, __local half16 *iptr); +half __ovld fract(half x, __private half *iptr); +half2 __ovld fract(half2 x, __private half2 *iptr); +half3 __ovld fract(half3 x, __private half3 *iptr); +half4 __ovld fract(half4 x, __private half4 *iptr); +half8 __ovld fract(half8 x, __private half8 *iptr); +half16 __ovld fract(half16 x, __private half16 *iptr); +#endif //cl_khr_fp16 +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +/** + * Extract mantissa and exponent from x. For each + * component the mantissa returned is a float with + * magnitude in the interval [1/2, 1) or 0. Each + * component of x equals mantissa returned * 2^exp. + */ +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +float __ovld frexp(float x, int *exp); +float2 __ovld frexp(float2 x, int2 *exp); +float3 __ovld frexp(float3 x, int3 *exp); +float4 __ovld frexp(float4 x, int4 *exp); +float8 __ovld frexp(float8 x, int8 *exp); +float16 __ovld frexp(float16 x, int16 *exp); +#ifdef cl_khr_fp64 +double __ovld frexp(double x, int *exp); +double2 __ovld frexp(double2 x, int2 *exp); +double3 __ovld frexp(double3 x, int3 *exp); +double4 __ovld frexp(double4 x, int4 *exp); +double8 __ovld frexp(double8 x, int8 *exp); +double16 __ovld frexp(double16 x, int16 *exp); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld frexp(half x, int *exp); +half2 __ovld frexp(half2 x, int2 *exp); +half3 __ovld frexp(half3 x, int3 *exp); +half4 __ovld frexp(half4 x, int4 *exp); +half8 __ovld frexp(half8 x, int8 *exp); +half16 __ovld frexp(half16 x, int16 *exp); +#endif //cl_khr_fp16 +#else +float __ovld frexp(float x, __global int *exp); +float2 __ovld frexp(float2 x, __global int2 *exp); +float3 __ovld frexp(float3 x, __global int3 *exp); +float4 __ovld frexp(float4 x, __global int4 *exp); +float8 __ovld frexp(float8 x, __global int8 *exp); +float16 __ovld frexp(float16 x, __global int16 *exp); +float __ovld frexp(float x, __local int *exp); +float2 __ovld frexp(float2 x, __local int2 *exp); +float3 __ovld frexp(float3 x, __local int3 *exp); +float4 __ovld frexp(float4 x, __local int4 *exp); +float8 __ovld frexp(float8 x, __local int8 *exp); +float16 __ovld frexp(float16 x, __local int16 *exp); +float __ovld frexp(float x, __private int *exp); +float2 __ovld frexp(float2 x, __private int2 *exp); +float3 __ovld frexp(float3 x, __private int3 *exp); +float4 __ovld frexp(float4 x, __private int4 *exp); +float8 __ovld frexp(float8 x, __private int8 *exp); +float16 __ovld frexp(float16 x, __private int16 *exp); +#ifdef cl_khr_fp64 +double __ovld frexp(double x, __global int *exp); +double2 __ovld frexp(double2 x, __global int2 *exp); +double3 __ovld frexp(double3 x, __global int3 *exp); +double4 __ovld frexp(double4 x, __global int4 *exp); +double8 __ovld frexp(double8 x, __global int8 *exp); +double16 __ovld frexp(double16 x, __global int16 *exp); +double __ovld frexp(double x, __local int *exp); +double2 __ovld frexp(double2 x, __local int2 *exp); +double3 __ovld frexp(double3 x, __local int3 *exp); +double4 __ovld frexp(double4 x, __local int4 *exp); +double8 __ovld frexp(double8 x, __local int8 *exp); +double16 __ovld frexp(double16 x, __local int16 *exp); +double __ovld frexp(double x, __private int *exp); +double2 __ovld frexp(double2 x, __private int2 *exp); +double3 __ovld frexp(double3 x, __private int3 *exp); +double4 __ovld frexp(double4 x, __private int4 *exp); +double8 __ovld frexp(double8 x, __private int8 *exp); +double16 __ovld frexp(double16 x, __private int16 *exp); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld frexp(half x, __global int *exp); +half2 __ovld frexp(half2 x, __global int2 *exp); +half3 __ovld frexp(half3 x, __global int3 *exp); +half4 __ovld frexp(half4 x, __global int4 *exp); +half8 __ovld frexp(half8 x, __global int8 *exp); +half16 __ovld frexp(half16 x, __global int16 *exp); +half __ovld frexp(half x, __local int *exp); +half2 __ovld frexp(half2 x, __local int2 *exp); +half3 __ovld frexp(half3 x, __local int3 *exp); +half4 __ovld frexp(half4 x, __local int4 *exp); +half8 __ovld frexp(half8 x, __local int8 *exp); +half16 __ovld frexp(half16 x, __local int16 *exp); +half __ovld frexp(half x, __private int *exp); +half2 __ovld frexp(half2 x, __private int2 *exp); +half3 __ovld frexp(half3 x, __private int3 *exp); +half4 __ovld frexp(half4 x, __private int4 *exp); +half8 __ovld frexp(half8 x, __private int8 *exp); +half16 __ovld frexp(half16 x, __private int16 *exp); +#endif //cl_khr_fp16 +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +/** + * Compute the value of the square root of x^2 + y^2 + * without undue overflow or underflow. + */ +float __ovld __cnfn hypot(float x, float y); +float2 __ovld __cnfn hypot(float2 x, float2 y); +float3 __ovld __cnfn hypot(float3 x, float3 y); +float4 __ovld __cnfn hypot(float4 x, float4 y); +float8 __ovld __cnfn hypot(float8 x, float8 y); +float16 __ovld __cnfn hypot(float16 x, float16 y); +#ifdef cl_khr_fp64 +double __ovld __cnfn hypot(double x, double y); +double2 __ovld __cnfn hypot(double2 x, double2 y); +double3 __ovld __cnfn hypot(double3 x, double3 y); +double4 __ovld __cnfn hypot(double4 x, double4 y); +double8 __ovld __cnfn hypot(double8 x, double8 y); +double16 __ovld __cnfn hypot(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn hypot(half x, half y); +half2 __ovld __cnfn hypot(half2 x, half2 y); +half3 __ovld __cnfn hypot(half3 x, half3 y); +half4 __ovld __cnfn hypot(half4 x, half4 y); +half8 __ovld __cnfn hypot(half8 x, half8 y); +half16 __ovld __cnfn hypot(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Return the exponent as an integer value. + */ +int __ovld __cnfn ilogb(float x); +int2 __ovld __cnfn ilogb(float2 x); +int3 __ovld __cnfn ilogb(float3 x); +int4 __ovld __cnfn ilogb(float4 x); +int8 __ovld __cnfn ilogb(float8 x); +int16 __ovld __cnfn ilogb(float16 x); +#ifdef cl_khr_fp64 +int __ovld __cnfn ilogb(double x); +int2 __ovld __cnfn ilogb(double2 x); +int3 __ovld __cnfn ilogb(double3 x); +int4 __ovld __cnfn ilogb(double4 x); +int8 __ovld __cnfn ilogb(double8 x); +int16 __ovld __cnfn ilogb(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn ilogb(half x); +int2 __ovld __cnfn ilogb(half2 x); +int3 __ovld __cnfn ilogb(half3 x); +int4 __ovld __cnfn ilogb(half4 x); +int8 __ovld __cnfn ilogb(half8 x); +int16 __ovld __cnfn ilogb(half16 x); +#endif //cl_khr_fp16 + +/** + * Multiply x by 2 to the power n. + */ +float __ovld __cnfn ldexp(float x, int n); +float2 __ovld __cnfn ldexp(float2 x, int2 n); +float3 __ovld __cnfn ldexp(float3 x, int3 n); +float4 __ovld __cnfn ldexp(float4 x, int4 n); +float8 __ovld __cnfn ldexp(float8 x, int8 n); +float16 __ovld __cnfn ldexp(float16 x, int16 n); +float2 __ovld __cnfn ldexp(float2 x, int n); +float3 __ovld __cnfn ldexp(float3 x, int n); +float4 __ovld __cnfn ldexp(float4 x, int n); +float8 __ovld __cnfn ldexp(float8 x, int n); +float16 __ovld __cnfn ldexp(float16 x, int n); +#ifdef cl_khr_fp64 +double __ovld __cnfn ldexp(double x, int n); +double2 __ovld __cnfn ldexp(double2 x, int2 n); +double3 __ovld __cnfn ldexp(double3 x, int3 n); +double4 __ovld __cnfn ldexp(double4 x, int4 n); +double8 __ovld __cnfn ldexp(double8 x, int8 n); +double16 __ovld __cnfn ldexp(double16 x, int16 n); +double2 __ovld __cnfn ldexp(double2 x, int n); +double3 __ovld __cnfn ldexp(double3 x, int n); +double4 __ovld __cnfn ldexp(double4 x, int n); +double8 __ovld __cnfn ldexp(double8 x, int n); +double16 __ovld __cnfn ldexp(double16 x, int n); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn ldexp(half x, int n); +half2 __ovld __cnfn ldexp(half2 x, int2 n); +half3 __ovld __cnfn ldexp(half3 x, int3 n); +half4 __ovld __cnfn ldexp(half4 x, int4 n); +half8 __ovld __cnfn ldexp(half8 x, int8 n); +half16 __ovld __cnfn ldexp(half16 x, int16 n); +half2 __ovld __cnfn ldexp(half2 x, int n); +half3 __ovld __cnfn ldexp(half3 x, int n); +half4 __ovld __cnfn ldexp(half4 x, int n); +half8 __ovld __cnfn ldexp(half8 x, int n); +half16 __ovld __cnfn ldexp(half16 x, int n); +#endif //cl_khr_fp16 + +/** + * Log gamma function. Returns the natural + * logarithm of the absolute value of the gamma + * function. The sign of the gamma function is + * returned in the signp argument of lgamma_r. + */ +float __ovld __cnfn lgamma(float x); +float2 __ovld __cnfn lgamma(float2 x); +float3 __ovld __cnfn lgamma(float3 x); +float4 __ovld __cnfn lgamma(float4 x); +float8 __ovld __cnfn lgamma(float8 x); +float16 __ovld __cnfn lgamma(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn lgamma(double x); +double2 __ovld __cnfn lgamma(double2 x); +double3 __ovld __cnfn lgamma(double3 x); +double4 __ovld __cnfn lgamma(double4 x); +double8 __ovld __cnfn lgamma(double8 x); +double16 __ovld __cnfn lgamma(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn lgamma(half x); +half2 __ovld __cnfn lgamma(half2 x); +half3 __ovld __cnfn lgamma(half3 x); +half4 __ovld __cnfn lgamma(half4 x); +half8 __ovld __cnfn lgamma(half8 x); +half16 __ovld __cnfn lgamma(half16 x); +#endif //cl_khr_fp16 + +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +float __ovld lgamma_r(float x, int *signp); +float2 __ovld lgamma_r(float2 x, int2 *signp); +float3 __ovld lgamma_r(float3 x, int3 *signp); +float4 __ovld lgamma_r(float4 x, int4 *signp); +float8 __ovld lgamma_r(float8 x, int8 *signp); +float16 __ovld lgamma_r(float16 x, int16 *signp); +#ifdef cl_khr_fp64 +double __ovld lgamma_r(double x, int *signp); +double2 __ovld lgamma_r(double2 x, int2 *signp); +double3 __ovld lgamma_r(double3 x, int3 *signp); +double4 __ovld lgamma_r(double4 x, int4 *signp); +double8 __ovld lgamma_r(double8 x, int8 *signp); +double16 __ovld lgamma_r(double16 x, int16 *signp); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld lgamma_r(half x, int *signp); +half2 __ovld lgamma_r(half2 x, int2 *signp); +half3 __ovld lgamma_r(half3 x, int3 *signp); +half4 __ovld lgamma_r(half4 x, int4 *signp); +half8 __ovld lgamma_r(half8 x, int8 *signp); +half16 __ovld lgamma_r(half16 x, int16 *signp); +#endif //cl_khr_fp16 +#else +float __ovld lgamma_r(float x, __global int *signp); +float2 __ovld lgamma_r(float2 x, __global int2 *signp); +float3 __ovld lgamma_r(float3 x, __global int3 *signp); +float4 __ovld lgamma_r(float4 x, __global int4 *signp); +float8 __ovld lgamma_r(float8 x, __global int8 *signp); +float16 __ovld lgamma_r(float16 x, __global int16 *signp); +float __ovld lgamma_r(float x, __local int *signp); +float2 __ovld lgamma_r(float2 x, __local int2 *signp); +float3 __ovld lgamma_r(float3 x, __local int3 *signp); +float4 __ovld lgamma_r(float4 x, __local int4 *signp); +float8 __ovld lgamma_r(float8 x, __local int8 *signp); +float16 __ovld lgamma_r(float16 x, __local int16 *signp); +float __ovld lgamma_r(float x, __private int *signp); +float2 __ovld lgamma_r(float2 x, __private int2 *signp); +float3 __ovld lgamma_r(float3 x, __private int3 *signp); +float4 __ovld lgamma_r(float4 x, __private int4 *signp); +float8 __ovld lgamma_r(float8 x, __private int8 *signp); +float16 __ovld lgamma_r(float16 x, __private int16 *signp); +#ifdef cl_khr_fp64 +double __ovld lgamma_r(double x, __global int *signp); +double2 __ovld lgamma_r(double2 x, __global int2 *signp); +double3 __ovld lgamma_r(double3 x, __global int3 *signp); +double4 __ovld lgamma_r(double4 x, __global int4 *signp); +double8 __ovld lgamma_r(double8 x, __global int8 *signp); +double16 __ovld lgamma_r(double16 x, __global int16 *signp); +double __ovld lgamma_r(double x, __local int *signp); +double2 __ovld lgamma_r(double2 x, __local int2 *signp); +double3 __ovld lgamma_r(double3 x, __local int3 *signp); +double4 __ovld lgamma_r(double4 x, __local int4 *signp); +double8 __ovld lgamma_r(double8 x, __local int8 *signp); +double16 __ovld lgamma_r(double16 x, __local int16 *signp); +double __ovld lgamma_r(double x, __private int *signp); +double2 __ovld lgamma_r(double2 x, __private int2 *signp); +double3 __ovld lgamma_r(double3 x, __private int3 *signp); +double4 __ovld lgamma_r(double4 x, __private int4 *signp); +double8 __ovld lgamma_r(double8 x, __private int8 *signp); +double16 __ovld lgamma_r(double16 x, __private int16 *signp); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld lgamma_r(half x, __global int *signp); +half2 __ovld lgamma_r(half2 x, __global int2 *signp); +half3 __ovld lgamma_r(half3 x, __global int3 *signp); +half4 __ovld lgamma_r(half4 x, __global int4 *signp); +half8 __ovld lgamma_r(half8 x, __global int8 *signp); +half16 __ovld lgamma_r(half16 x, __global int16 *signp); +half __ovld lgamma_r(half x, __local int *signp); +half2 __ovld lgamma_r(half2 x, __local int2 *signp); +half3 __ovld lgamma_r(half3 x, __local int3 *signp); +half4 __ovld lgamma_r(half4 x, __local int4 *signp); +half8 __ovld lgamma_r(half8 x, __local int8 *signp); +half16 __ovld lgamma_r(half16 x, __local int16 *signp); +half __ovld lgamma_r(half x, __private int *signp); +half2 __ovld lgamma_r(half2 x, __private int2 *signp); +half3 __ovld lgamma_r(half3 x, __private int3 *signp); +half4 __ovld lgamma_r(half4 x, __private int4 *signp); +half8 __ovld lgamma_r(half8 x, __private int8 *signp); +half16 __ovld lgamma_r(half16 x, __private int16 *signp); +#endif //cl_khr_fp16 +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +/** + * Compute natural logarithm. + */ +float __ovld __cnfn log(float); +float2 __ovld __cnfn log(float2); +float3 __ovld __cnfn log(float3); +float4 __ovld __cnfn log(float4); +float8 __ovld __cnfn log(float8); +float16 __ovld __cnfn log(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn log(double); +double2 __ovld __cnfn log(double2); +double3 __ovld __cnfn log(double3); +double4 __ovld __cnfn log(double4); +double8 __ovld __cnfn log(double8); +double16 __ovld __cnfn log(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn log(half); +half2 __ovld __cnfn log(half2); +half3 __ovld __cnfn log(half3); +half4 __ovld __cnfn log(half4); +half8 __ovld __cnfn log(half8); +half16 __ovld __cnfn log(half16); +#endif //cl_khr_fp16 + +/** + * Compute a base 2 logarithm. + */ +float __ovld __cnfn log2(float); +float2 __ovld __cnfn log2(float2); +float3 __ovld __cnfn log2(float3); +float4 __ovld __cnfn log2(float4); +float8 __ovld __cnfn log2(float8); +float16 __ovld __cnfn log2(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn log2(double); +double2 __ovld __cnfn log2(double2); +double3 __ovld __cnfn log2(double3); +double4 __ovld __cnfn log2(double4); +double8 __ovld __cnfn log2(double8); +double16 __ovld __cnfn log2(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn log2(half); +half2 __ovld __cnfn log2(half2); +half3 __ovld __cnfn log2(half3); +half4 __ovld __cnfn log2(half4); +half8 __ovld __cnfn log2(half8); +half16 __ovld __cnfn log2(half16); +#endif //cl_khr_fp16 + +/** + * Compute a base 10 logarithm. + */ +float __ovld __cnfn log10(float); +float2 __ovld __cnfn log10(float2); +float3 __ovld __cnfn log10(float3); +float4 __ovld __cnfn log10(float4); +float8 __ovld __cnfn log10(float8); +float16 __ovld __cnfn log10(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn log10(double); +double2 __ovld __cnfn log10(double2); +double3 __ovld __cnfn log10(double3); +double4 __ovld __cnfn log10(double4); +double8 __ovld __cnfn log10(double8); +double16 __ovld __cnfn log10(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn log10(half); +half2 __ovld __cnfn log10(half2); +half3 __ovld __cnfn log10(half3); +half4 __ovld __cnfn log10(half4); +half8 __ovld __cnfn log10(half8); +half16 __ovld __cnfn log10(half16); +#endif //cl_khr_fp16 + +/** + * Compute a base e logarithm of (1.0 + x). + */ +float __ovld __cnfn log1p(float x); +float2 __ovld __cnfn log1p(float2 x); +float3 __ovld __cnfn log1p(float3 x); +float4 __ovld __cnfn log1p(float4 x); +float8 __ovld __cnfn log1p(float8 x); +float16 __ovld __cnfn log1p(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn log1p(double x); +double2 __ovld __cnfn log1p(double2 x); +double3 __ovld __cnfn log1p(double3 x); +double4 __ovld __cnfn log1p(double4 x); +double8 __ovld __cnfn log1p(double8 x); +double16 __ovld __cnfn log1p(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn log1p(half x); +half2 __ovld __cnfn log1p(half2 x); +half3 __ovld __cnfn log1p(half3 x); +half4 __ovld __cnfn log1p(half4 x); +half8 __ovld __cnfn log1p(half8 x); +half16 __ovld __cnfn log1p(half16 x); +#endif //cl_khr_fp16 + +/** + * Compute the exponent of x, which is the integral + * part of logr | x |. + */ +float __ovld __cnfn logb(float x); +float2 __ovld __cnfn logb(float2 x); +float3 __ovld __cnfn logb(float3 x); +float4 __ovld __cnfn logb(float4 x); +float8 __ovld __cnfn logb(float8 x); +float16 __ovld __cnfn logb(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn logb(double x); +double2 __ovld __cnfn logb(double2 x); +double3 __ovld __cnfn logb(double3 x); +double4 __ovld __cnfn logb(double4 x); +double8 __ovld __cnfn logb(double8 x); +double16 __ovld __cnfn logb(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn logb(half x); +half2 __ovld __cnfn logb(half2 x); +half3 __ovld __cnfn logb(half3 x); +half4 __ovld __cnfn logb(half4 x); +half8 __ovld __cnfn logb(half8 x); +half16 __ovld __cnfn logb(half16 x); +#endif //cl_khr_fp16 + +/** + * mad approximates a * b + c. Whether or how the + * product of a * b is rounded and how supernormal or + * subnormal intermediate products are handled is not + * defined. mad is intended to be used where speed is + * preferred over accuracy. + */ +float __ovld __cnfn mad(float a, float b, float c); +float2 __ovld __cnfn mad(float2 a, float2 b, float2 c); +float3 __ovld __cnfn mad(float3 a, float3 b, float3 c); +float4 __ovld __cnfn mad(float4 a, float4 b, float4 c); +float8 __ovld __cnfn mad(float8 a, float8 b, float8 c); +float16 __ovld __cnfn mad(float16 a, float16 b, float16 c); +#ifdef cl_khr_fp64 +double __ovld __cnfn mad(double a, double b, double c); +double2 __ovld __cnfn mad(double2 a, double2 b, double2 c); +double3 __ovld __cnfn mad(double3 a, double3 b, double3 c); +double4 __ovld __cnfn mad(double4 a, double4 b, double4 c); +double8 __ovld __cnfn mad(double8 a, double8 b, double8 c); +double16 __ovld __cnfn mad(double16 a, double16 b, double16 c); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn mad(half a, half b, half c); +half2 __ovld __cnfn mad(half2 a, half2 b, half2 c); +half3 __ovld __cnfn mad(half3 a, half3 b, half3 c); +half4 __ovld __cnfn mad(half4 a, half4 b, half4 c); +half8 __ovld __cnfn mad(half8 a, half8 b, half8 c); +half16 __ovld __cnfn mad(half16 a, half16 b, half16 c); +#endif //cl_khr_fp16 + +/** + * Returns x if | x | > | y |, y if | y | > | x |, otherwise + * fmax(x, y). + */ +float __ovld __cnfn maxmag(float x, float y); +float2 __ovld __cnfn maxmag(float2 x, float2 y); +float3 __ovld __cnfn maxmag(float3 x, float3 y); +float4 __ovld __cnfn maxmag(float4 x, float4 y); +float8 __ovld __cnfn maxmag(float8 x, float8 y); +float16 __ovld __cnfn maxmag(float16 x, float16 y); +#ifdef cl_khr_fp64 +double __ovld __cnfn maxmag(double x, double y); +double2 __ovld __cnfn maxmag(double2 x, double2 y); +double3 __ovld __cnfn maxmag(double3 x, double3 y); +double4 __ovld __cnfn maxmag(double4 x, double4 y); +double8 __ovld __cnfn maxmag(double8 x, double8 y); +double16 __ovld __cnfn maxmag(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn maxmag(half x, half y); +half2 __ovld __cnfn maxmag(half2 x, half2 y); +half3 __ovld __cnfn maxmag(half3 x, half3 y); +half4 __ovld __cnfn maxmag(half4 x, half4 y); +half8 __ovld __cnfn maxmag(half8 x, half8 y); +half16 __ovld __cnfn maxmag(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Returns x if | x | < | y |, y if | y | < | x |, otherwise + * fmin(x, y). + */ +float __ovld __cnfn minmag(float x, float y); +float2 __ovld __cnfn minmag(float2 x, float2 y); +float3 __ovld __cnfn minmag(float3 x, float3 y); +float4 __ovld __cnfn minmag(float4 x, float4 y); +float8 __ovld __cnfn minmag(float8 x, float8 y); +float16 __ovld __cnfn minmag(float16 x, float16 y); +#ifdef cl_khr_fp64 +double __ovld __cnfn minmag(double x, double y); +double2 __ovld __cnfn minmag(double2 x, double2 y); +double3 __ovld __cnfn minmag(double3 x, double3 y); +double4 __ovld __cnfn minmag(double4 x, double4 y); +double8 __ovld __cnfn minmag(double8 x, double8 y); +double16 __ovld __cnfn minmag(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn minmag(half x, half y); +half2 __ovld __cnfn minmag(half2 x, half2 y); +half3 __ovld __cnfn minmag(half3 x, half3 y); +half4 __ovld __cnfn minmag(half4 x, half4 y); +half8 __ovld __cnfn minmag(half8 x, half8 y); +half16 __ovld __cnfn minmag(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Decompose a floating-point number. The modf + * function breaks the argument x into integral and + * fractional parts, each of which has the same sign as + * the argument. It stores the integral part in the object + * pointed to by iptr. + */ +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +float __ovld modf(float x, float *iptr); +float2 __ovld modf(float2 x, float2 *iptr); +float3 __ovld modf(float3 x, float3 *iptr); +float4 __ovld modf(float4 x, float4 *iptr); +float8 __ovld modf(float8 x, float8 *iptr); +float16 __ovld modf(float16 x, float16 *iptr); +#ifdef cl_khr_fp64 +double __ovld modf(double x, double *iptr); +double2 __ovld modf(double2 x, double2 *iptr); +double3 __ovld modf(double3 x, double3 *iptr); +double4 __ovld modf(double4 x, double4 *iptr); +double8 __ovld modf(double8 x, double8 *iptr); +double16 __ovld modf(double16 x, double16 *iptr); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld modf(half x, half *iptr); +half2 __ovld modf(half2 x, half2 *iptr); +half3 __ovld modf(half3 x, half3 *iptr); +half4 __ovld modf(half4 x, half4 *iptr); +half8 __ovld modf(half8 x, half8 *iptr); +half16 __ovld modf(half16 x, half16 *iptr); +#endif //cl_khr_fp16 +#else +float __ovld modf(float x, __global float *iptr); +float2 __ovld modf(float2 x, __global float2 *iptr); +float3 __ovld modf(float3 x, __global float3 *iptr); +float4 __ovld modf(float4 x, __global float4 *iptr); +float8 __ovld modf(float8 x, __global float8 *iptr); +float16 __ovld modf(float16 x, __global float16 *iptr); +float __ovld modf(float x, __local float *iptr); +float2 __ovld modf(float2 x, __local float2 *iptr); +float3 __ovld modf(float3 x, __local float3 *iptr); +float4 __ovld modf(float4 x, __local float4 *iptr); +float8 __ovld modf(float8 x, __local float8 *iptr); +float16 __ovld modf(float16 x, __local float16 *iptr); +float __ovld modf(float x, __private float *iptr); +float2 __ovld modf(float2 x, __private float2 *iptr); +float3 __ovld modf(float3 x, __private float3 *iptr); +float4 __ovld modf(float4 x, __private float4 *iptr); +float8 __ovld modf(float8 x, __private float8 *iptr); +float16 __ovld modf(float16 x, __private float16 *iptr); +#ifdef cl_khr_fp64 +double __ovld modf(double x, __global double *iptr); +double2 __ovld modf(double2 x, __global double2 *iptr); +double3 __ovld modf(double3 x, __global double3 *iptr); +double4 __ovld modf(double4 x, __global double4 *iptr); +double8 __ovld modf(double8 x, __global double8 *iptr); +double16 __ovld modf(double16 x, __global double16 *iptr); +double __ovld modf(double x, __local double *iptr); +double2 __ovld modf(double2 x, __local double2 *iptr); +double3 __ovld modf(double3 x, __local double3 *iptr); +double4 __ovld modf(double4 x, __local double4 *iptr); +double8 __ovld modf(double8 x, __local double8 *iptr); +double16 __ovld modf(double16 x, __local double16 *iptr); +double __ovld modf(double x, __private double *iptr); +double2 __ovld modf(double2 x, __private double2 *iptr); +double3 __ovld modf(double3 x, __private double3 *iptr); +double4 __ovld modf(double4 x, __private double4 *iptr); +double8 __ovld modf(double8 x, __private double8 *iptr); +double16 __ovld modf(double16 x, __private double16 *iptr); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld modf(half x, __global half *iptr); +half2 __ovld modf(half2 x, __global half2 *iptr); +half3 __ovld modf(half3 x, __global half3 *iptr); +half4 __ovld modf(half4 x, __global half4 *iptr); +half8 __ovld modf(half8 x, __global half8 *iptr); +half16 __ovld modf(half16 x, __global half16 *iptr); +half __ovld modf(half x, __local half *iptr); +half2 __ovld modf(half2 x, __local half2 *iptr); +half3 __ovld modf(half3 x, __local half3 *iptr); +half4 __ovld modf(half4 x, __local half4 *iptr); +half8 __ovld modf(half8 x, __local half8 *iptr); +half16 __ovld modf(half16 x, __local half16 *iptr); +half __ovld modf(half x, __private half *iptr); +half2 __ovld modf(half2 x, __private half2 *iptr); +half3 __ovld modf(half3 x, __private half3 *iptr); +half4 __ovld modf(half4 x, __private half4 *iptr); +half8 __ovld modf(half8 x, __private half8 *iptr); +half16 __ovld modf(half16 x, __private half16 *iptr); +#endif //cl_khr_fp16 +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +/** + * Returns a quiet NaN. The nancode may be placed + * in the significand of the resulting NaN. + */ +float __ovld __cnfn nan(uint nancode); +float2 __ovld __cnfn nan(uint2 nancode); +float3 __ovld __cnfn nan(uint3 nancode); +float4 __ovld __cnfn nan(uint4 nancode); +float8 __ovld __cnfn nan(uint8 nancode); +float16 __ovld __cnfn nan(uint16 nancode); +#ifdef cl_khr_fp64 +double __ovld __cnfn nan(ulong nancode); +double2 __ovld __cnfn nan(ulong2 nancode); +double3 __ovld __cnfn nan(ulong3 nancode); +double4 __ovld __cnfn nan(ulong4 nancode); +double8 __ovld __cnfn nan(ulong8 nancode); +double16 __ovld __cnfn nan(ulong16 nancode); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn nan(ushort nancode); +half2 __ovld __cnfn nan(ushort2 nancode); +half3 __ovld __cnfn nan(ushort3 nancode); +half4 __ovld __cnfn nan(ushort4 nancode); +half8 __ovld __cnfn nan(ushort8 nancode); +half16 __ovld __cnfn nan(ushort16 nancode); +#endif //cl_khr_fp16 + +/** + * Computes the next representable single-precision + * floating-point value following x in the direction of + * y. Thus, if y is less than x, nextafter() returns the + * largest representable floating-point number less + * than x. + */ +float __ovld __cnfn nextafter(float x, float y); +float2 __ovld __cnfn nextafter(float2 x, float2 y); +float3 __ovld __cnfn nextafter(float3 x, float3 y); +float4 __ovld __cnfn nextafter(float4 x, float4 y); +float8 __ovld __cnfn nextafter(float8 x, float8 y); +float16 __ovld __cnfn nextafter(float16 x, float16 y); +#ifdef cl_khr_fp64 +double __ovld __cnfn nextafter(double x, double y); +double2 __ovld __cnfn nextafter(double2 x, double2 y); +double3 __ovld __cnfn nextafter(double3 x, double3 y); +double4 __ovld __cnfn nextafter(double4 x, double4 y); +double8 __ovld __cnfn nextafter(double8 x, double8 y); +double16 __ovld __cnfn nextafter(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn nextafter(half x, half y); +half2 __ovld __cnfn nextafter(half2 x, half2 y); +half3 __ovld __cnfn nextafter(half3 x, half3 y); +half4 __ovld __cnfn nextafter(half4 x, half4 y); +half8 __ovld __cnfn nextafter(half8 x, half8 y); +half16 __ovld __cnfn nextafter(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Compute x to the power y. + */ +float __ovld __cnfn pow(float x, float y); +float2 __ovld __cnfn pow(float2 x, float2 y); +float3 __ovld __cnfn pow(float3 x, float3 y); +float4 __ovld __cnfn pow(float4 x, float4 y); +float8 __ovld __cnfn pow(float8 x, float8 y); +float16 __ovld __cnfn pow(float16 x, float16 y); +#ifdef cl_khr_fp64 +double __ovld __cnfn pow(double x, double y); +double2 __ovld __cnfn pow(double2 x, double2 y); +double3 __ovld __cnfn pow(double3 x, double3 y); +double4 __ovld __cnfn pow(double4 x, double4 y); +double8 __ovld __cnfn pow(double8 x, double8 y); +double16 __ovld __cnfn pow(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn pow(half x, half y); +half2 __ovld __cnfn pow(half2 x, half2 y); +half3 __ovld __cnfn pow(half3 x, half3 y); +half4 __ovld __cnfn pow(half4 x, half4 y); +half8 __ovld __cnfn pow(half8 x, half8 y); +half16 __ovld __cnfn pow(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Compute x to the power y, where y is an integer. + */ +float __ovld __cnfn pown(float x, int y); +float2 __ovld __cnfn pown(float2 x, int2 y); +float3 __ovld __cnfn pown(float3 x, int3 y); +float4 __ovld __cnfn pown(float4 x, int4 y); +float8 __ovld __cnfn pown(float8 x, int8 y); +float16 __ovld __cnfn pown(float16 x, int16 y); +#ifdef cl_khr_fp64 +double __ovld __cnfn pown(double x, int y); +double2 __ovld __cnfn pown(double2 x, int2 y); +double3 __ovld __cnfn pown(double3 x, int3 y); +double4 __ovld __cnfn pown(double4 x, int4 y); +double8 __ovld __cnfn pown(double8 x, int8 y); +double16 __ovld __cnfn pown(double16 x, int16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn pown(half x, int y); +half2 __ovld __cnfn pown(half2 x, int2 y); +half3 __ovld __cnfn pown(half3 x, int3 y); +half4 __ovld __cnfn pown(half4 x, int4 y); +half8 __ovld __cnfn pown(half8 x, int8 y); +half16 __ovld __cnfn pown(half16 x, int16 y); +#endif //cl_khr_fp16 + +/** + * Compute x to the power y, where x is >= 0. + */ +float __ovld __cnfn powr(float x, float y); +float2 __ovld __cnfn powr(float2 x, float2 y); +float3 __ovld __cnfn powr(float3 x, float3 y); +float4 __ovld __cnfn powr(float4 x, float4 y); +float8 __ovld __cnfn powr(float8 x, float8 y); +float16 __ovld __cnfn powr(float16 x, float16 y); +#ifdef cl_khr_fp64 +double __ovld __cnfn powr(double x, double y); +double2 __ovld __cnfn powr(double2 x, double2 y); +double3 __ovld __cnfn powr(double3 x, double3 y); +double4 __ovld __cnfn powr(double4 x, double4 y); +double8 __ovld __cnfn powr(double8 x, double8 y); +double16 __ovld __cnfn powr(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn powr(half x, half y); +half2 __ovld __cnfn powr(half2 x, half2 y); +half3 __ovld __cnfn powr(half3 x, half3 y); +half4 __ovld __cnfn powr(half4 x, half4 y); +half8 __ovld __cnfn powr(half8 x, half8 y); +half16 __ovld __cnfn powr(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Compute the value r such that r = x - n*y, where n + * is the integer nearest the exact value of x/y. If there + * are two integers closest to x/y, n shall be the even + * one. If r is zero, it is given the same sign as x. + */ +float __ovld __cnfn remainder(float x, float y); +float2 __ovld __cnfn remainder(float2 x, float2 y); +float3 __ovld __cnfn remainder(float3 x, float3 y); +float4 __ovld __cnfn remainder(float4 x, float4 y); +float8 __ovld __cnfn remainder(float8 x, float8 y); +float16 __ovld __cnfn remainder(float16 x, float16 y); +#ifdef cl_khr_fp64 +double __ovld __cnfn remainder(double x, double y); +double2 __ovld __cnfn remainder(double2 x, double2 y); +double3 __ovld __cnfn remainder(double3 x, double3 y); +double4 __ovld __cnfn remainder(double4 x, double4 y); +double8 __ovld __cnfn remainder(double8 x, double8 y); +double16 __ovld __cnfn remainder(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn remainder(half x, half y); +half2 __ovld __cnfn remainder(half2 x, half2 y); +half3 __ovld __cnfn remainder(half3 x, half3 y); +half4 __ovld __cnfn remainder(half4 x, half4 y); +half8 __ovld __cnfn remainder(half8 x, half8 y); +half16 __ovld __cnfn remainder(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * The remquo function computes the value r such + * that r = x - n*y, where n is the integer nearest the + * exact value of x/y. If there are two integers closest + * to x/y, n shall be the even one. If r is zero, it is + * given the same sign as x. This is the same value + * that is returned by the remainder function. + * remquo also calculates the lower seven bits of the + * integral quotient x/y, and gives that value the same + * sign as x/y. It stores this signed value in the object + * pointed to by quo. + */ +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +float __ovld remquo(float x, float y, int *quo); +float2 __ovld remquo(float2 x, float2 y, int2 *quo); +float3 __ovld remquo(float3 x, float3 y, int3 *quo); +float4 __ovld remquo(float4 x, float4 y, int4 *quo); +float8 __ovld remquo(float8 x, float8 y, int8 *quo); +float16 __ovld remquo(float16 x, float16 y, int16 *quo); +#ifdef cl_khr_fp64 +double __ovld remquo(double x, double y, int *quo); +double2 __ovld remquo(double2 x, double2 y, int2 *quo); +double3 __ovld remquo(double3 x, double3 y, int3 *quo); +double4 __ovld remquo(double4 x, double4 y, int4 *quo); +double8 __ovld remquo(double8 x, double8 y, int8 *quo); +double16 __ovld remquo(double16 x, double16 y, int16 *quo); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld remquo(half x, half y, int *quo); +half2 __ovld remquo(half2 x, half2 y, int2 *quo); +half3 __ovld remquo(half3 x, half3 y, int3 *quo); +half4 __ovld remquo(half4 x, half4 y, int4 *quo); +half8 __ovld remquo(half8 x, half8 y, int8 *quo); +half16 __ovld remquo(half16 x, half16 y, int16 *quo); + +#endif //cl_khr_fp16 +#else +float __ovld remquo(float x, float y, __global int *quo); +float2 __ovld remquo(float2 x, float2 y, __global int2 *quo); +float3 __ovld remquo(float3 x, float3 y, __global int3 *quo); +float4 __ovld remquo(float4 x, float4 y, __global int4 *quo); +float8 __ovld remquo(float8 x, float8 y, __global int8 *quo); +float16 __ovld remquo(float16 x, float16 y, __global int16 *quo); +float __ovld remquo(float x, float y, __local int *quo); +float2 __ovld remquo(float2 x, float2 y, __local int2 *quo); +float3 __ovld remquo(float3 x, float3 y, __local int3 *quo); +float4 __ovld remquo(float4 x, float4 y, __local int4 *quo); +float8 __ovld remquo(float8 x, float8 y, __local int8 *quo); +float16 __ovld remquo(float16 x, float16 y, __local int16 *quo); +float __ovld remquo(float x, float y, __private int *quo); +float2 __ovld remquo(float2 x, float2 y, __private int2 *quo); +float3 __ovld remquo(float3 x, float3 y, __private int3 *quo); +float4 __ovld remquo(float4 x, float4 y, __private int4 *quo); +float8 __ovld remquo(float8 x, float8 y, __private int8 *quo); +float16 __ovld remquo(float16 x, float16 y, __private int16 *quo); +#ifdef cl_khr_fp64 +double __ovld remquo(double x, double y, __global int *quo); +double2 __ovld remquo(double2 x, double2 y, __global int2 *quo); +double3 __ovld remquo(double3 x, double3 y, __global int3 *quo); +double4 __ovld remquo(double4 x, double4 y, __global int4 *quo); +double8 __ovld remquo(double8 x, double8 y, __global int8 *quo); +double16 __ovld remquo(double16 x, double16 y, __global int16 *quo); +double __ovld remquo(double x, double y, __local int *quo); +double2 __ovld remquo(double2 x, double2 y, __local int2 *quo); +double3 __ovld remquo(double3 x, double3 y, __local int3 *quo); +double4 __ovld remquo(double4 x, double4 y, __local int4 *quo); +double8 __ovld remquo(double8 x, double8 y, __local int8 *quo); +double16 __ovld remquo(double16 x, double16 y, __local int16 *quo); +double __ovld remquo(double x, double y, __private int *quo); +double2 __ovld remquo(double2 x, double2 y, __private int2 *quo); +double3 __ovld remquo(double3 x, double3 y, __private int3 *quo); +double4 __ovld remquo(double4 x, double4 y, __private int4 *quo); +double8 __ovld remquo(double8 x, double8 y, __private int8 *quo); +double16 __ovld remquo(double16 x, double16 y, __private int16 *quo); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld remquo(half x, half y, __global int *quo); +half2 __ovld remquo(half2 x, half2 y, __global int2 *quo); +half3 __ovld remquo(half3 x, half3 y, __global int3 *quo); +half4 __ovld remquo(half4 x, half4 y, __global int4 *quo); +half8 __ovld remquo(half8 x, half8 y, __global int8 *quo); +half16 __ovld remquo(half16 x, half16 y, __global int16 *quo); +half __ovld remquo(half x, half y, __local int *quo); +half2 __ovld remquo(half2 x, half2 y, __local int2 *quo); +half3 __ovld remquo(half3 x, half3 y, __local int3 *quo); +half4 __ovld remquo(half4 x, half4 y, __local int4 *quo); +half8 __ovld remquo(half8 x, half8 y, __local int8 *quo); +half16 __ovld remquo(half16 x, half16 y, __local int16 *quo); +half __ovld remquo(half x, half y, __private int *quo); +half2 __ovld remquo(half2 x, half2 y, __private int2 *quo); +half3 __ovld remquo(half3 x, half3 y, __private int3 *quo); +half4 __ovld remquo(half4 x, half4 y, __private int4 *quo); +half8 __ovld remquo(half8 x, half8 y, __private int8 *quo); +half16 __ovld remquo(half16 x, half16 y, __private int16 *quo); +#endif //cl_khr_fp16 +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 +/** + * Round to integral value (using round to nearest + * even rounding mode) in floating-point format. + * Refer to section 7.1 for description of rounding + * modes. + */ +float __ovld __cnfn rint(float); +float2 __ovld __cnfn rint(float2); +float3 __ovld __cnfn rint(float3); +float4 __ovld __cnfn rint(float4); +float8 __ovld __cnfn rint(float8); +float16 __ovld __cnfn rint(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn rint(double); +double2 __ovld __cnfn rint(double2); +double3 __ovld __cnfn rint(double3); +double4 __ovld __cnfn rint(double4); +double8 __ovld __cnfn rint(double8); +double16 __ovld __cnfn rint(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn rint(half); +half2 __ovld __cnfn rint(half2); +half3 __ovld __cnfn rint(half3); +half4 __ovld __cnfn rint(half4); +half8 __ovld __cnfn rint(half8); +half16 __ovld __cnfn rint(half16); +#endif //cl_khr_fp16 + +/** + * Compute x to the power 1/y. + */ +float __ovld __cnfn rootn(float x, int y); +float2 __ovld __cnfn rootn(float2 x, int2 y); +float3 __ovld __cnfn rootn(float3 x, int3 y); +float4 __ovld __cnfn rootn(float4 x, int4 y); +float8 __ovld __cnfn rootn(float8 x, int8 y); +float16 __ovld __cnfn rootn(float16 x, int16 y); +#ifdef cl_khr_fp64 +double __ovld __cnfn rootn(double x, int y); +double2 __ovld __cnfn rootn(double2 x, int2 y); +double3 __ovld __cnfn rootn(double3 x, int3 y); +double4 __ovld __cnfn rootn(double4 x, int4 y); +double8 __ovld __cnfn rootn(double8 x, int8 y); +double16 __ovld __cnfn rootn(double16 x, int16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn rootn(half x, int y); +half2 __ovld __cnfn rootn(half2 x, int2 y); +half3 __ovld __cnfn rootn(half3 x, int3 y); +half4 __ovld __cnfn rootn(half4 x, int4 y); +half8 __ovld __cnfn rootn(half8 x, int8 y); +half16 __ovld __cnfn rootn(half16 x, int16 y); +#endif //cl_khr_fp16 + +/** + * Return the integral value nearest to x rounding + * halfway cases away from zero, regardless of the + * current rounding direction. + */ +float __ovld __cnfn round(float x); +float2 __ovld __cnfn round(float2 x); +float3 __ovld __cnfn round(float3 x); +float4 __ovld __cnfn round(float4 x); +float8 __ovld __cnfn round(float8 x); +float16 __ovld __cnfn round(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn round(double x); +double2 __ovld __cnfn round(double2 x); +double3 __ovld __cnfn round(double3 x); +double4 __ovld __cnfn round(double4 x); +double8 __ovld __cnfn round(double8 x); +double16 __ovld __cnfn round(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn round(half x); +half2 __ovld __cnfn round(half2 x); +half3 __ovld __cnfn round(half3 x); +half4 __ovld __cnfn round(half4 x); +half8 __ovld __cnfn round(half8 x); +half16 __ovld __cnfn round(half16 x); +#endif //cl_khr_fp16 + +/** + * Compute inverse square root. + */ +float __ovld __cnfn rsqrt(float); +float2 __ovld __cnfn rsqrt(float2); +float3 __ovld __cnfn rsqrt(float3); +float4 __ovld __cnfn rsqrt(float4); +float8 __ovld __cnfn rsqrt(float8); +float16 __ovld __cnfn rsqrt(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn rsqrt(double); +double2 __ovld __cnfn rsqrt(double2); +double3 __ovld __cnfn rsqrt(double3); +double4 __ovld __cnfn rsqrt(double4); +double8 __ovld __cnfn rsqrt(double8); +double16 __ovld __cnfn rsqrt(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn rsqrt(half); +half2 __ovld __cnfn rsqrt(half2); +half3 __ovld __cnfn rsqrt(half3); +half4 __ovld __cnfn rsqrt(half4); +half8 __ovld __cnfn rsqrt(half8); +half16 __ovld __cnfn rsqrt(half16); +#endif //cl_khr_fp16 + +/** + * Compute sine. + */ +float __ovld __cnfn sin(float); +float2 __ovld __cnfn sin(float2); +float3 __ovld __cnfn sin(float3); +float4 __ovld __cnfn sin(float4); +float8 __ovld __cnfn sin(float8); +float16 __ovld __cnfn sin(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn sin(double); +double2 __ovld __cnfn sin(double2); +double3 __ovld __cnfn sin(double3); +double4 __ovld __cnfn sin(double4); +double8 __ovld __cnfn sin(double8); +double16 __ovld __cnfn sin(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn sin(half); +half2 __ovld __cnfn sin(half2); +half3 __ovld __cnfn sin(half3); +half4 __ovld __cnfn sin(half4); +half8 __ovld __cnfn sin(half8); +half16 __ovld __cnfn sin(half16); +#endif //cl_khr_fp16 + +/** + * Compute sine and cosine of x. The computed sine + * is the return value and computed cosine is returned + * in cosval. + */ +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +float __ovld sincos(float x, float *cosval); +float2 __ovld sincos(float2 x, float2 *cosval); +float3 __ovld sincos(float3 x, float3 *cosval); +float4 __ovld sincos(float4 x, float4 *cosval); +float8 __ovld sincos(float8 x, float8 *cosval); +float16 __ovld sincos(float16 x, float16 *cosval); +#ifdef cl_khr_fp64 +double __ovld sincos(double x, double *cosval); +double2 __ovld sincos(double2 x, double2 *cosval); +double3 __ovld sincos(double3 x, double3 *cosval); +double4 __ovld sincos(double4 x, double4 *cosval); +double8 __ovld sincos(double8 x, double8 *cosval); +double16 __ovld sincos(double16 x, double16 *cosval); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld sincos(half x, half *cosval); +half2 __ovld sincos(half2 x, half2 *cosval); +half3 __ovld sincos(half3 x, half3 *cosval); +half4 __ovld sincos(half4 x, half4 *cosval); +half8 __ovld sincos(half8 x, half8 *cosval); +half16 __ovld sincos(half16 x, half16 *cosval); +#endif //cl_khr_fp16 +#else +float __ovld sincos(float x, __global float *cosval); +float2 __ovld sincos(float2 x, __global float2 *cosval); +float3 __ovld sincos(float3 x, __global float3 *cosval); +float4 __ovld sincos(float4 x, __global float4 *cosval); +float8 __ovld sincos(float8 x, __global float8 *cosval); +float16 __ovld sincos(float16 x, __global float16 *cosval); +float __ovld sincos(float x, __local float *cosval); +float2 __ovld sincos(float2 x, __local float2 *cosval); +float3 __ovld sincos(float3 x, __local float3 *cosval); +float4 __ovld sincos(float4 x, __local float4 *cosval); +float8 __ovld sincos(float8 x, __local float8 *cosval); +float16 __ovld sincos(float16 x, __local float16 *cosval); +float __ovld sincos(float x, __private float *cosval); +float2 __ovld sincos(float2 x, __private float2 *cosval); +float3 __ovld sincos(float3 x, __private float3 *cosval); +float4 __ovld sincos(float4 x, __private float4 *cosval); +float8 __ovld sincos(float8 x, __private float8 *cosval); +float16 __ovld sincos(float16 x, __private float16 *cosval); +#ifdef cl_khr_fp64 +double __ovld sincos(double x, __global double *cosval); +double2 __ovld sincos(double2 x, __global double2 *cosval); +double3 __ovld sincos(double3 x, __global double3 *cosval); +double4 __ovld sincos(double4 x, __global double4 *cosval); +double8 __ovld sincos(double8 x, __global double8 *cosval); +double16 __ovld sincos(double16 x, __global double16 *cosval); +double __ovld sincos(double x, __local double *cosval); +double2 __ovld sincos(double2 x, __local double2 *cosval); +double3 __ovld sincos(double3 x, __local double3 *cosval); +double4 __ovld sincos(double4 x, __local double4 *cosval); +double8 __ovld sincos(double8 x, __local double8 *cosval); +double16 __ovld sincos(double16 x, __local double16 *cosval); +double __ovld sincos(double x, __private double *cosval); +double2 __ovld sincos(double2 x, __private double2 *cosval); +double3 __ovld sincos(double3 x, __private double3 *cosval); +double4 __ovld sincos(double4 x, __private double4 *cosval); +double8 __ovld sincos(double8 x, __private double8 *cosval); +double16 __ovld sincos(double16 x, __private double16 *cosval); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld sincos(half x, __global half *cosval); +half2 __ovld sincos(half2 x, __global half2 *cosval); +half3 __ovld sincos(half3 x, __global half3 *cosval); +half4 __ovld sincos(half4 x, __global half4 *cosval); +half8 __ovld sincos(half8 x, __global half8 *cosval); +half16 __ovld sincos(half16 x, __global half16 *cosval); +half __ovld sincos(half x, __local half *cosval); +half2 __ovld sincos(half2 x, __local half2 *cosval); +half3 __ovld sincos(half3 x, __local half3 *cosval); +half4 __ovld sincos(half4 x, __local half4 *cosval); +half8 __ovld sincos(half8 x, __local half8 *cosval); +half16 __ovld sincos(half16 x, __local half16 *cosval); +half __ovld sincos(half x, __private half *cosval); +half2 __ovld sincos(half2 x, __private half2 *cosval); +half3 __ovld sincos(half3 x, __private half3 *cosval); +half4 __ovld sincos(half4 x, __private half4 *cosval); +half8 __ovld sincos(half8 x, __private half8 *cosval); +half16 __ovld sincos(half16 x, __private half16 *cosval); +#endif //cl_khr_fp16 +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +/** + * Compute hyperbolic sine. + */ +float __ovld __cnfn sinh(float); +float2 __ovld __cnfn sinh(float2); +float3 __ovld __cnfn sinh(float3); +float4 __ovld __cnfn sinh(float4); +float8 __ovld __cnfn sinh(float8); +float16 __ovld __cnfn sinh(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn sinh(double); +double2 __ovld __cnfn sinh(double2); +double3 __ovld __cnfn sinh(double3); +double4 __ovld __cnfn sinh(double4); +double8 __ovld __cnfn sinh(double8); +double16 __ovld __cnfn sinh(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn sinh(half); +half2 __ovld __cnfn sinh(half2); +half3 __ovld __cnfn sinh(half3); +half4 __ovld __cnfn sinh(half4); +half8 __ovld __cnfn sinh(half8); +half16 __ovld __cnfn sinh(half16); +#endif //cl_khr_fp16 + +/** + * Compute sin (PI * x). + */ +float __ovld __cnfn sinpi(float x); +float2 __ovld __cnfn sinpi(float2 x); +float3 __ovld __cnfn sinpi(float3 x); +float4 __ovld __cnfn sinpi(float4 x); +float8 __ovld __cnfn sinpi(float8 x); +float16 __ovld __cnfn sinpi(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn sinpi(double x); +double2 __ovld __cnfn sinpi(double2 x); +double3 __ovld __cnfn sinpi(double3 x); +double4 __ovld __cnfn sinpi(double4 x); +double8 __ovld __cnfn sinpi(double8 x); +double16 __ovld __cnfn sinpi(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn sinpi(half x); +half2 __ovld __cnfn sinpi(half2 x); +half3 __ovld __cnfn sinpi(half3 x); +half4 __ovld __cnfn sinpi(half4 x); +half8 __ovld __cnfn sinpi(half8 x); +half16 __ovld __cnfn sinpi(half16 x); +#endif //cl_khr_fp16 + +/** + * Compute square root. + */ +float __ovld __cnfn sqrt(float); +float2 __ovld __cnfn sqrt(float2); +float3 __ovld __cnfn sqrt(float3); +float4 __ovld __cnfn sqrt(float4); +float8 __ovld __cnfn sqrt(float8); +float16 __ovld __cnfn sqrt(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn sqrt(double); +double2 __ovld __cnfn sqrt(double2); +double3 __ovld __cnfn sqrt(double3); +double4 __ovld __cnfn sqrt(double4); +double8 __ovld __cnfn sqrt(double8); +double16 __ovld __cnfn sqrt(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn sqrt(half); +half2 __ovld __cnfn sqrt(half2); +half3 __ovld __cnfn sqrt(half3); +half4 __ovld __cnfn sqrt(half4); +half8 __ovld __cnfn sqrt(half8); +half16 __ovld __cnfn sqrt(half16); +#endif //cl_khr_fp16 + +/** + * Compute tangent. + */ +float __ovld __cnfn tan(float); +float2 __ovld __cnfn tan(float2); +float3 __ovld __cnfn tan(float3); +float4 __ovld __cnfn tan(float4); +float8 __ovld __cnfn tan(float8); +float16 __ovld __cnfn tan(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn tan(double); +double2 __ovld __cnfn tan(double2); +double3 __ovld __cnfn tan(double3); +double4 __ovld __cnfn tan(double4); +double8 __ovld __cnfn tan(double8); +double16 __ovld __cnfn tan(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn tan(half); +half2 __ovld __cnfn tan(half2); +half3 __ovld __cnfn tan(half3); +half4 __ovld __cnfn tan(half4); +half8 __ovld __cnfn tan(half8); +half16 __ovld __cnfn tan(half16); +#endif //cl_khr_fp16 + +/** + * Compute hyperbolic tangent. + */ +float __ovld __cnfn tanh(float); +float2 __ovld __cnfn tanh(float2); +float3 __ovld __cnfn tanh(float3); +float4 __ovld __cnfn tanh(float4); +float8 __ovld __cnfn tanh(float8); +float16 __ovld __cnfn tanh(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn tanh(double); +double2 __ovld __cnfn tanh(double2); +double3 __ovld __cnfn tanh(double3); +double4 __ovld __cnfn tanh(double4); +double8 __ovld __cnfn tanh(double8); +double16 __ovld __cnfn tanh(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn tanh(half); +half2 __ovld __cnfn tanh(half2); +half3 __ovld __cnfn tanh(half3); +half4 __ovld __cnfn tanh(half4); +half8 __ovld __cnfn tanh(half8); +half16 __ovld __cnfn tanh(half16); +#endif //cl_khr_fp16 + +/** + * Compute tan (PI * x). + */ +float __ovld __cnfn tanpi(float x); +float2 __ovld __cnfn tanpi(float2 x); +float3 __ovld __cnfn tanpi(float3 x); +float4 __ovld __cnfn tanpi(float4 x); +float8 __ovld __cnfn tanpi(float8 x); +float16 __ovld __cnfn tanpi(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn tanpi(double x); +double2 __ovld __cnfn tanpi(double2 x); +double3 __ovld __cnfn tanpi(double3 x); +double4 __ovld __cnfn tanpi(double4 x); +double8 __ovld __cnfn tanpi(double8 x); +double16 __ovld __cnfn tanpi(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn tanpi(half x); +half2 __ovld __cnfn tanpi(half2 x); +half3 __ovld __cnfn tanpi(half3 x); +half4 __ovld __cnfn tanpi(half4 x); +half8 __ovld __cnfn tanpi(half8 x); +half16 __ovld __cnfn tanpi(half16 x); +#endif //cl_khr_fp16 + +/** + * Compute the gamma function. + */ +float __ovld __cnfn tgamma(float); +float2 __ovld __cnfn tgamma(float2); +float3 __ovld __cnfn tgamma(float3); +float4 __ovld __cnfn tgamma(float4); +float8 __ovld __cnfn tgamma(float8); +float16 __ovld __cnfn tgamma(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn tgamma(double); +double2 __ovld __cnfn tgamma(double2); +double3 __ovld __cnfn tgamma(double3); +double4 __ovld __cnfn tgamma(double4); +double8 __ovld __cnfn tgamma(double8); +double16 __ovld __cnfn tgamma(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn tgamma(half); +half2 __ovld __cnfn tgamma(half2); +half3 __ovld __cnfn tgamma(half3); +half4 __ovld __cnfn tgamma(half4); +half8 __ovld __cnfn tgamma(half8); +half16 __ovld __cnfn tgamma(half16); +#endif //cl_khr_fp16 + +/** + * Round to integral value using the round to zero + * rounding mode. + */ +float __ovld __cnfn trunc(float); +float2 __ovld __cnfn trunc(float2); +float3 __ovld __cnfn trunc(float3); +float4 __ovld __cnfn trunc(float4); +float8 __ovld __cnfn trunc(float8); +float16 __ovld __cnfn trunc(float16); +#ifdef cl_khr_fp64 +double __ovld __cnfn trunc(double); +double2 __ovld __cnfn trunc(double2); +double3 __ovld __cnfn trunc(double3); +double4 __ovld __cnfn trunc(double4); +double8 __ovld __cnfn trunc(double8); +double16 __ovld __cnfn trunc(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn trunc(half); +half2 __ovld __cnfn trunc(half2); +half3 __ovld __cnfn trunc(half3); +half4 __ovld __cnfn trunc(half4); +half8 __ovld __cnfn trunc(half8); +half16 __ovld __cnfn trunc(half16); +#endif //cl_khr_fp16 + +/** + * Compute cosine. x must be in the range -2^16 ... +2^16. + */ +float __ovld __cnfn half_cos(float x); +float2 __ovld __cnfn half_cos(float2 x); +float3 __ovld __cnfn half_cos(float3 x); +float4 __ovld __cnfn half_cos(float4 x); +float8 __ovld __cnfn half_cos(float8 x); +float16 __ovld __cnfn half_cos(float16 x); + +/** + * Compute x / y. + */ +float __ovld __cnfn half_divide(float x, float y); +float2 __ovld __cnfn half_divide(float2 x, float2 y); +float3 __ovld __cnfn half_divide(float3 x, float3 y); +float4 __ovld __cnfn half_divide(float4 x, float4 y); +float8 __ovld __cnfn half_divide(float8 x, float8 y); +float16 __ovld __cnfn half_divide(float16 x, float16 y); + +/** + * Compute the base- e exponential of x. + */ +float __ovld __cnfn half_exp(float x); +float2 __ovld __cnfn half_exp(float2 x); +float3 __ovld __cnfn half_exp(float3 x); +float4 __ovld __cnfn half_exp(float4 x); +float8 __ovld __cnfn half_exp(float8 x); +float16 __ovld __cnfn half_exp(float16 x); + +/** + * Compute the base- 2 exponential of x. + */ +float __ovld __cnfn half_exp2(float x); +float2 __ovld __cnfn half_exp2(float2 x); +float3 __ovld __cnfn half_exp2(float3 x); +float4 __ovld __cnfn half_exp2(float4 x); +float8 __ovld __cnfn half_exp2(float8 x); +float16 __ovld __cnfn half_exp2(float16 x); + +/** + * Compute the base- 10 exponential of x. + */ +float __ovld __cnfn half_exp10(float x); +float2 __ovld __cnfn half_exp10(float2 x); +float3 __ovld __cnfn half_exp10(float3 x); +float4 __ovld __cnfn half_exp10(float4 x); +float8 __ovld __cnfn half_exp10(float8 x); +float16 __ovld __cnfn half_exp10(float16 x); + +/** + * Compute natural logarithm. + */ +float __ovld __cnfn half_log(float x); +float2 __ovld __cnfn half_log(float2 x); +float3 __ovld __cnfn half_log(float3 x); +float4 __ovld __cnfn half_log(float4 x); +float8 __ovld __cnfn half_log(float8 x); +float16 __ovld __cnfn half_log(float16 x); + +/** + * Compute a base 2 logarithm. + */ +float __ovld __cnfn half_log2(float x); +float2 __ovld __cnfn half_log2(float2 x); +float3 __ovld __cnfn half_log2(float3 x); +float4 __ovld __cnfn half_log2(float4 x); +float8 __ovld __cnfn half_log2(float8 x); +float16 __ovld __cnfn half_log2(float16 x); + +/** + * Compute a base 10 logarithm. + */ +float __ovld __cnfn half_log10(float x); +float2 __ovld __cnfn half_log10(float2 x); +float3 __ovld __cnfn half_log10(float3 x); +float4 __ovld __cnfn half_log10(float4 x); +float8 __ovld __cnfn half_log10(float8 x); +float16 __ovld __cnfn half_log10(float16 x); + +/** + * Compute x to the power y, where x is >= 0. + */ +float __ovld __cnfn half_powr(float x, float y); +float2 __ovld __cnfn half_powr(float2 x, float2 y); +float3 __ovld __cnfn half_powr(float3 x, float3 y); +float4 __ovld __cnfn half_powr(float4 x, float4 y); +float8 __ovld __cnfn half_powr(float8 x, float8 y); +float16 __ovld __cnfn half_powr(float16 x, float16 y); + +/** + * Compute reciprocal. + */ +float __ovld __cnfn half_recip(float x); +float2 __ovld __cnfn half_recip(float2 x); +float3 __ovld __cnfn half_recip(float3 x); +float4 __ovld __cnfn half_recip(float4 x); +float8 __ovld __cnfn half_recip(float8 x); +float16 __ovld __cnfn half_recip(float16 x); + +/** + * Compute inverse square root. + */ +float __ovld __cnfn half_rsqrt(float x); +float2 __ovld __cnfn half_rsqrt(float2 x); +float3 __ovld __cnfn half_rsqrt(float3 x); +float4 __ovld __cnfn half_rsqrt(float4 x); +float8 __ovld __cnfn half_rsqrt(float8 x); +float16 __ovld __cnfn half_rsqrt(float16 x); + +/** + * Compute sine. x must be in the range -2^16 ... +2^16. + */ +float __ovld __cnfn half_sin(float x); +float2 __ovld __cnfn half_sin(float2 x); +float3 __ovld __cnfn half_sin(float3 x); +float4 __ovld __cnfn half_sin(float4 x); +float8 __ovld __cnfn half_sin(float8 x); +float16 __ovld __cnfn half_sin(float16 x); + +/** + * Compute square root. + */ +float __ovld __cnfn half_sqrt(float x); +float2 __ovld __cnfn half_sqrt(float2 x); +float3 __ovld __cnfn half_sqrt(float3 x); +float4 __ovld __cnfn half_sqrt(float4 x); +float8 __ovld __cnfn half_sqrt(float8 x); +float16 __ovld __cnfn half_sqrt(float16 x); + +/** + * Compute tangent. x must be in the range -216 ... +216. + */ +float __ovld __cnfn half_tan(float x); +float2 __ovld __cnfn half_tan(float2 x); +float3 __ovld __cnfn half_tan(float3 x); +float4 __ovld __cnfn half_tan(float4 x); +float8 __ovld __cnfn half_tan(float8 x); +float16 __ovld __cnfn half_tan(float16 x); + +/** + * Compute cosine over an implementation-defined range. + * The maximum error is implementation-defined. + */ +float __ovld __cnfn native_cos(float x); +float2 __ovld __cnfn native_cos(float2 x); +float3 __ovld __cnfn native_cos(float3 x); +float4 __ovld __cnfn native_cos(float4 x); +float8 __ovld __cnfn native_cos(float8 x); +float16 __ovld __cnfn native_cos(float16 x); + +/** + * Compute x / y over an implementation-defined range. + * The maximum error is implementation-defined. + */ +float __ovld __cnfn native_divide(float x, float y); +float2 __ovld __cnfn native_divide(float2 x, float2 y); +float3 __ovld __cnfn native_divide(float3 x, float3 y); +float4 __ovld __cnfn native_divide(float4 x, float4 y); +float8 __ovld __cnfn native_divide(float8 x, float8 y); +float16 __ovld __cnfn native_divide(float16 x, float16 y); + +/** + * Compute the base- e exponential of x over an + * implementation-defined range. The maximum error is + * implementation-defined. + */ +float __ovld __cnfn native_exp(float x); +float2 __ovld __cnfn native_exp(float2 x); +float3 __ovld __cnfn native_exp(float3 x); +float4 __ovld __cnfn native_exp(float4 x); +float8 __ovld __cnfn native_exp(float8 x); +float16 __ovld __cnfn native_exp(float16 x); + +/** + * Compute the base- 2 exponential of x over an + * implementation-defined range. The maximum error is + * implementation-defined. + */ +float __ovld __cnfn native_exp2(float x); +float2 __ovld __cnfn native_exp2(float2 x); +float3 __ovld __cnfn native_exp2(float3 x); +float4 __ovld __cnfn native_exp2(float4 x); +float8 __ovld __cnfn native_exp2(float8 x); +float16 __ovld __cnfn native_exp2(float16 x); + +/** + * Compute the base- 10 exponential of x over an + * implementation-defined range. The maximum error is + * implementation-defined. + */ +float __ovld __cnfn native_exp10(float x); +float2 __ovld __cnfn native_exp10(float2 x); +float3 __ovld __cnfn native_exp10(float3 x); +float4 __ovld __cnfn native_exp10(float4 x); +float8 __ovld __cnfn native_exp10(float8 x); +float16 __ovld __cnfn native_exp10(float16 x); + +/** + * Compute natural logarithm over an implementationdefined + * range. The maximum error is implementation + * defined. + */ +float __ovld __cnfn native_log(float x); +float2 __ovld __cnfn native_log(float2 x); +float3 __ovld __cnfn native_log(float3 x); +float4 __ovld __cnfn native_log(float4 x); +float8 __ovld __cnfn native_log(float8 x); +float16 __ovld __cnfn native_log(float16 x); + +/** + * Compute a base 2 logarithm over an implementationdefined + * range. The maximum error is implementationdefined. + */ +float __ovld __cnfn native_log2(float x); +float2 __ovld __cnfn native_log2(float2 x); +float3 __ovld __cnfn native_log2(float3 x); +float4 __ovld __cnfn native_log2(float4 x); +float8 __ovld __cnfn native_log2(float8 x); +float16 __ovld __cnfn native_log2(float16 x); + +/** + * Compute a base 10 logarithm over an implementationdefined + * range. The maximum error is implementationdefined. + */ +float __ovld __cnfn native_log10(float x); +float2 __ovld __cnfn native_log10(float2 x); +float3 __ovld __cnfn native_log10(float3 x); +float4 __ovld __cnfn native_log10(float4 x); +float8 __ovld __cnfn native_log10(float8 x); +float16 __ovld __cnfn native_log10(float16 x); + +/** + * Compute x to the power y, where x is >= 0. The range of + * x and y are implementation-defined. The maximum error + * is implementation-defined. + */ +float __ovld __cnfn native_powr(float x, float y); +float2 __ovld __cnfn native_powr(float2 x, float2 y); +float3 __ovld __cnfn native_powr(float3 x, float3 y); +float4 __ovld __cnfn native_powr(float4 x, float4 y); +float8 __ovld __cnfn native_powr(float8 x, float8 y); +float16 __ovld __cnfn native_powr(float16 x, float16 y); + +/** + * Compute reciprocal over an implementation-defined + * range. The maximum error is implementation-defined. + */ +float __ovld __cnfn native_recip(float x); +float2 __ovld __cnfn native_recip(float2 x); +float3 __ovld __cnfn native_recip(float3 x); +float4 __ovld __cnfn native_recip(float4 x); +float8 __ovld __cnfn native_recip(float8 x); +float16 __ovld __cnfn native_recip(float16 x); + +/** + * Compute inverse square root over an implementationdefined + * range. The maximum error is implementationdefined. + */ +float __ovld __cnfn native_rsqrt(float x); +float2 __ovld __cnfn native_rsqrt(float2 x); +float3 __ovld __cnfn native_rsqrt(float3 x); +float4 __ovld __cnfn native_rsqrt(float4 x); +float8 __ovld __cnfn native_rsqrt(float8 x); +float16 __ovld __cnfn native_rsqrt(float16 x); + +/** + * Compute sine over an implementation-defined range. + * The maximum error is implementation-defined. + */ +float __ovld __cnfn native_sin(float x); +float2 __ovld __cnfn native_sin(float2 x); +float3 __ovld __cnfn native_sin(float3 x); +float4 __ovld __cnfn native_sin(float4 x); +float8 __ovld __cnfn native_sin(float8 x); +float16 __ovld __cnfn native_sin(float16 x); + +/** + * Compute square root over an implementation-defined + * range. The maximum error is implementation-defined. + */ +float __ovld __cnfn native_sqrt(float x); +float2 __ovld __cnfn native_sqrt(float2 x); +float3 __ovld __cnfn native_sqrt(float3 x); +float4 __ovld __cnfn native_sqrt(float4 x); +float8 __ovld __cnfn native_sqrt(float8 x); +float16 __ovld __cnfn native_sqrt(float16 x); + +/** + * Compute tangent over an implementation-defined range. + * The maximum error is implementation-defined. + */ +float __ovld __cnfn native_tan(float x); +float2 __ovld __cnfn native_tan(float2 x); +float3 __ovld __cnfn native_tan(float3 x); +float4 __ovld __cnfn native_tan(float4 x); +float8 __ovld __cnfn native_tan(float8 x); +float16 __ovld __cnfn native_tan(float16 x); + +// OpenCL v1.1 s6.11.3, v1.2 s6.12.3, v2.0 s6.13.3 - Integer Functions + +/** + * Returns | x |. + */ +uchar __ovld __cnfn abs(char x); +uchar __ovld __cnfn abs(uchar x); +uchar2 __ovld __cnfn abs(char2 x); +uchar2 __ovld __cnfn abs(uchar2 x); +uchar3 __ovld __cnfn abs(char3 x); +uchar3 __ovld __cnfn abs(uchar3 x); +uchar4 __ovld __cnfn abs(char4 x); +uchar4 __ovld __cnfn abs(uchar4 x); +uchar8 __ovld __cnfn abs(char8 x); +uchar8 __ovld __cnfn abs(uchar8 x); +uchar16 __ovld __cnfn abs(char16 x); +uchar16 __ovld __cnfn abs(uchar16 x); +ushort __ovld __cnfn abs(short x); +ushort __ovld __cnfn abs(ushort x); +ushort2 __ovld __cnfn abs(short2 x); +ushort2 __ovld __cnfn abs(ushort2 x); +ushort3 __ovld __cnfn abs(short3 x); +ushort3 __ovld __cnfn abs(ushort3 x); +ushort4 __ovld __cnfn abs(short4 x); +ushort4 __ovld __cnfn abs(ushort4 x); +ushort8 __ovld __cnfn abs(short8 x); +ushort8 __ovld __cnfn abs(ushort8 x); +ushort16 __ovld __cnfn abs(short16 x); +ushort16 __ovld __cnfn abs(ushort16 x); +uint __ovld __cnfn abs(int x); +uint __ovld __cnfn abs(uint x); +uint2 __ovld __cnfn abs(int2 x); +uint2 __ovld __cnfn abs(uint2 x); +uint3 __ovld __cnfn abs(int3 x); +uint3 __ovld __cnfn abs(uint3 x); +uint4 __ovld __cnfn abs(int4 x); +uint4 __ovld __cnfn abs(uint4 x); +uint8 __ovld __cnfn abs(int8 x); +uint8 __ovld __cnfn abs(uint8 x); +uint16 __ovld __cnfn abs(int16 x); +uint16 __ovld __cnfn abs(uint16 x); +ulong __ovld __cnfn abs(long x); +ulong __ovld __cnfn abs(ulong x); +ulong2 __ovld __cnfn abs(long2 x); +ulong2 __ovld __cnfn abs(ulong2 x); +ulong3 __ovld __cnfn abs(long3 x); +ulong3 __ovld __cnfn abs(ulong3 x); +ulong4 __ovld __cnfn abs(long4 x); +ulong4 __ovld __cnfn abs(ulong4 x); +ulong8 __ovld __cnfn abs(long8 x); +ulong8 __ovld __cnfn abs(ulong8 x); +ulong16 __ovld __cnfn abs(long16 x); +ulong16 __ovld __cnfn abs(ulong16 x); + +/** + * Returns | x - y | without modulo overflow. + */ +uchar __ovld __cnfn abs_diff(char x, char y); +uchar __ovld __cnfn abs_diff(uchar x, uchar y); +uchar2 __ovld __cnfn abs_diff(char2 x, char2 y); +uchar2 __ovld __cnfn abs_diff(uchar2 x, uchar2 y); +uchar3 __ovld __cnfn abs_diff(char3 x, char3 y); +uchar3 __ovld __cnfn abs_diff(uchar3 x, uchar3 y); +uchar4 __ovld __cnfn abs_diff(char4 x, char4 y); +uchar4 __ovld __cnfn abs_diff(uchar4 x, uchar4 y); +uchar8 __ovld __cnfn abs_diff(char8 x, char8 y); +uchar8 __ovld __cnfn abs_diff(uchar8 x, uchar8 y); +uchar16 __ovld __cnfn abs_diff(char16 x, char16 y); +uchar16 __ovld __cnfn abs_diff(uchar16 x, uchar16 y); +ushort __ovld __cnfn abs_diff(short x, short y); +ushort __ovld __cnfn abs_diff(ushort x, ushort y); +ushort2 __ovld __cnfn abs_diff(short2 x, short2 y); +ushort2 __ovld __cnfn abs_diff(ushort2 x, ushort2 y); +ushort3 __ovld __cnfn abs_diff(short3 x, short3 y); +ushort3 __ovld __cnfn abs_diff(ushort3 x, ushort3 y); +ushort4 __ovld __cnfn abs_diff(short4 x, short4 y); +ushort4 __ovld __cnfn abs_diff(ushort4 x, ushort4 y); +ushort8 __ovld __cnfn abs_diff(short8 x, short8 y); +ushort8 __ovld __cnfn abs_diff(ushort8 x, ushort8 y); +ushort16 __ovld __cnfn abs_diff(short16 x, short16 y); +ushort16 __ovld __cnfn abs_diff(ushort16 x, ushort16 y); +uint __ovld __cnfn abs_diff(int x, int y); +uint __ovld __cnfn abs_diff(uint x, uint y); +uint2 __ovld __cnfn abs_diff(int2 x, int2 y); +uint2 __ovld __cnfn abs_diff(uint2 x, uint2 y); +uint3 __ovld __cnfn abs_diff(int3 x, int3 y); +uint3 __ovld __cnfn abs_diff(uint3 x, uint3 y); +uint4 __ovld __cnfn abs_diff(int4 x, int4 y); +uint4 __ovld __cnfn abs_diff(uint4 x, uint4 y); +uint8 __ovld __cnfn abs_diff(int8 x, int8 y); +uint8 __ovld __cnfn abs_diff(uint8 x, uint8 y); +uint16 __ovld __cnfn abs_diff(int16 x, int16 y); +uint16 __ovld __cnfn abs_diff(uint16 x, uint16 y); +ulong __ovld __cnfn abs_diff(long x, long y); +ulong __ovld __cnfn abs_diff(ulong x, ulong y); +ulong2 __ovld __cnfn abs_diff(long2 x, long2 y); +ulong2 __ovld __cnfn abs_diff(ulong2 x, ulong2 y); +ulong3 __ovld __cnfn abs_diff(long3 x, long3 y); +ulong3 __ovld __cnfn abs_diff(ulong3 x, ulong3 y); +ulong4 __ovld __cnfn abs_diff(long4 x, long4 y); +ulong4 __ovld __cnfn abs_diff(ulong4 x, ulong4 y); +ulong8 __ovld __cnfn abs_diff(long8 x, long8 y); +ulong8 __ovld __cnfn abs_diff(ulong8 x, ulong8 y); +ulong16 __ovld __cnfn abs_diff(long16 x, long16 y); +ulong16 __ovld __cnfn abs_diff(ulong16 x, ulong16 y); + +/** + * Returns x + y and saturates the result. + */ +char __ovld __cnfn add_sat(char x, char y); +uchar __ovld __cnfn add_sat(uchar x, uchar y); +char2 __ovld __cnfn add_sat(char2 x, char2 y); +uchar2 __ovld __cnfn add_sat(uchar2 x, uchar2 y); +char3 __ovld __cnfn add_sat(char3 x, char3 y); +uchar3 __ovld __cnfn add_sat(uchar3 x, uchar3 y); +char4 __ovld __cnfn add_sat(char4 x, char4 y); +uchar4 __ovld __cnfn add_sat(uchar4 x, uchar4 y); +char8 __ovld __cnfn add_sat(char8 x, char8 y); +uchar8 __ovld __cnfn add_sat(uchar8 x, uchar8 y); +char16 __ovld __cnfn add_sat(char16 x, char16 y); +uchar16 __ovld __cnfn add_sat(uchar16 x, uchar16 y); +short __ovld __cnfn add_sat(short x, short y); +ushort __ovld __cnfn add_sat(ushort x, ushort y); +short2 __ovld __cnfn add_sat(short2 x, short2 y); +ushort2 __ovld __cnfn add_sat(ushort2 x, ushort2 y); +short3 __ovld __cnfn add_sat(short3 x, short3 y); +ushort3 __ovld __cnfn add_sat(ushort3 x, ushort3 y); +short4 __ovld __cnfn add_sat(short4 x, short4 y); +ushort4 __ovld __cnfn add_sat(ushort4 x, ushort4 y); +short8 __ovld __cnfn add_sat(short8 x, short8 y); +ushort8 __ovld __cnfn add_sat(ushort8 x, ushort8 y); +short16 __ovld __cnfn add_sat(short16 x, short16 y); +ushort16 __ovld __cnfn add_sat(ushort16 x, ushort16 y); +int __ovld __cnfn add_sat(int x, int y); +uint __ovld __cnfn add_sat(uint x, uint y); +int2 __ovld __cnfn add_sat(int2 x, int2 y); +uint2 __ovld __cnfn add_sat(uint2 x, uint2 y); +int3 __ovld __cnfn add_sat(int3 x, int3 y); +uint3 __ovld __cnfn add_sat(uint3 x, uint3 y); +int4 __ovld __cnfn add_sat(int4 x, int4 y); +uint4 __ovld __cnfn add_sat(uint4 x, uint4 y); +int8 __ovld __cnfn add_sat(int8 x, int8 y); +uint8 __ovld __cnfn add_sat(uint8 x, uint8 y); +int16 __ovld __cnfn add_sat(int16 x, int16 y); +uint16 __ovld __cnfn add_sat(uint16 x, uint16 y); +long __ovld __cnfn add_sat(long x, long y); +ulong __ovld __cnfn add_sat(ulong x, ulong y); +long2 __ovld __cnfn add_sat(long2 x, long2 y); +ulong2 __ovld __cnfn add_sat(ulong2 x, ulong2 y); +long3 __ovld __cnfn add_sat(long3 x, long3 y); +ulong3 __ovld __cnfn add_sat(ulong3 x, ulong3 y); +long4 __ovld __cnfn add_sat(long4 x, long4 y); +ulong4 __ovld __cnfn add_sat(ulong4 x, ulong4 y); +long8 __ovld __cnfn add_sat(long8 x, long8 y); +ulong8 __ovld __cnfn add_sat(ulong8 x, ulong8 y); +long16 __ovld __cnfn add_sat(long16 x, long16 y); +ulong16 __ovld __cnfn add_sat(ulong16 x, ulong16 y); + +/** + * Returns (x + y) >> 1. The intermediate sum does + * not modulo overflow. + */ +char __ovld __cnfn hadd(char x, char y); +uchar __ovld __cnfn hadd(uchar x, uchar y); +char2 __ovld __cnfn hadd(char2 x, char2 y); +uchar2 __ovld __cnfn hadd(uchar2 x, uchar2 y); +char3 __ovld __cnfn hadd(char3 x, char3 y); +uchar3 __ovld __cnfn hadd(uchar3 x, uchar3 y); +char4 __ovld __cnfn hadd(char4 x, char4 y); +uchar4 __ovld __cnfn hadd(uchar4 x, uchar4 y); +char8 __ovld __cnfn hadd(char8 x, char8 y); +uchar8 __ovld __cnfn hadd(uchar8 x, uchar8 y); +char16 __ovld __cnfn hadd(char16 x, char16 y); +uchar16 __ovld __cnfn hadd(uchar16 x, uchar16 y); +short __ovld __cnfn hadd(short x, short y); +ushort __ovld __cnfn hadd(ushort x, ushort y); +short2 __ovld __cnfn hadd(short2 x, short2 y); +ushort2 __ovld __cnfn hadd(ushort2 x, ushort2 y); +short3 __ovld __cnfn hadd(short3 x, short3 y); +ushort3 __ovld __cnfn hadd(ushort3 x, ushort3 y); +short4 __ovld __cnfn hadd(short4 x, short4 y); +ushort4 __ovld __cnfn hadd(ushort4 x, ushort4 y); +short8 __ovld __cnfn hadd(short8 x, short8 y); +ushort8 __ovld __cnfn hadd(ushort8 x, ushort8 y); +short16 __ovld __cnfn hadd(short16 x, short16 y); +ushort16 __ovld __cnfn hadd(ushort16 x, ushort16 y); +int __ovld __cnfn hadd(int x, int y); +uint __ovld __cnfn hadd(uint x, uint y); +int2 __ovld __cnfn hadd(int2 x, int2 y); +uint2 __ovld __cnfn hadd(uint2 x, uint2 y); +int3 __ovld __cnfn hadd(int3 x, int3 y); +uint3 __ovld __cnfn hadd(uint3 x, uint3 y); +int4 __ovld __cnfn hadd(int4 x, int4 y); +uint4 __ovld __cnfn hadd(uint4 x, uint4 y); +int8 __ovld __cnfn hadd(int8 x, int8 y); +uint8 __ovld __cnfn hadd(uint8 x, uint8 y); +int16 __ovld __cnfn hadd(int16 x, int16 y); +uint16 __ovld __cnfn hadd(uint16 x, uint16 y); +long __ovld __cnfn hadd(long x, long y); +ulong __ovld __cnfn hadd(ulong x, ulong y); +long2 __ovld __cnfn hadd(long2 x, long2 y); +ulong2 __ovld __cnfn hadd(ulong2 x, ulong2 y); +long3 __ovld __cnfn hadd(long3 x, long3 y); +ulong3 __ovld __cnfn hadd(ulong3 x, ulong3 y); +long4 __ovld __cnfn hadd(long4 x, long4 y); +ulong4 __ovld __cnfn hadd(ulong4 x, ulong4 y); +long8 __ovld __cnfn hadd(long8 x, long8 y); +ulong8 __ovld __cnfn hadd(ulong8 x, ulong8 y); +long16 __ovld __cnfn hadd(long16 x, long16 y); +ulong16 __ovld __cnfn hadd(ulong16 x, ulong16 y); + +/** + * Returns (x + y + 1) >> 1. The intermediate sum + * does not modulo overflow. + */ +char __ovld __cnfn rhadd(char x, char y); +uchar __ovld __cnfn rhadd(uchar x, uchar y); +char2 __ovld __cnfn rhadd(char2 x, char2 y); +uchar2 __ovld __cnfn rhadd(uchar2 x, uchar2 y); +char3 __ovld __cnfn rhadd(char3 x, char3 y); +uchar3 __ovld __cnfn rhadd(uchar3 x, uchar3 y); +char4 __ovld __cnfn rhadd(char4 x, char4 y); +uchar4 __ovld __cnfn rhadd(uchar4 x, uchar4 y); +char8 __ovld __cnfn rhadd(char8 x, char8 y); +uchar8 __ovld __cnfn rhadd(uchar8 x, uchar8 y); +char16 __ovld __cnfn rhadd(char16 x, char16 y); +uchar16 __ovld __cnfn rhadd(uchar16 x, uchar16 y); +short __ovld __cnfn rhadd(short x, short y); +ushort __ovld __cnfn rhadd(ushort x, ushort y); +short2 __ovld __cnfn rhadd(short2 x, short2 y); +ushort2 __ovld __cnfn rhadd(ushort2 x, ushort2 y); +short3 __ovld __cnfn rhadd(short3 x, short3 y); +ushort3 __ovld __cnfn rhadd(ushort3 x, ushort3 y); +short4 __ovld __cnfn rhadd(short4 x, short4 y); +ushort4 __ovld __cnfn rhadd(ushort4 x, ushort4 y); +short8 __ovld __cnfn rhadd(short8 x, short8 y); +ushort8 __ovld __cnfn rhadd(ushort8 x, ushort8 y); +short16 __ovld __cnfn rhadd(short16 x, short16 y); +ushort16 __ovld __cnfn rhadd(ushort16 x, ushort16 y); +int __ovld __cnfn rhadd(int x, int y); +uint __ovld __cnfn rhadd(uint x, uint y); +int2 __ovld __cnfn rhadd(int2 x, int2 y); +uint2 __ovld __cnfn rhadd(uint2 x, uint2 y); +int3 __ovld __cnfn rhadd(int3 x, int3 y); +uint3 __ovld __cnfn rhadd(uint3 x, uint3 y); +int4 __ovld __cnfn rhadd(int4 x, int4 y); +uint4 __ovld __cnfn rhadd(uint4 x, uint4 y); +int8 __ovld __cnfn rhadd(int8 x, int8 y); +uint8 __ovld __cnfn rhadd(uint8 x, uint8 y); +int16 __ovld __cnfn rhadd(int16 x, int16 y); +uint16 __ovld __cnfn rhadd(uint16 x, uint16 y); +long __ovld __cnfn rhadd(long x, long y); +ulong __ovld __cnfn rhadd(ulong x, ulong y); +long2 __ovld __cnfn rhadd(long2 x, long2 y); +ulong2 __ovld __cnfn rhadd(ulong2 x, ulong2 y); +long3 __ovld __cnfn rhadd(long3 x, long3 y); +ulong3 __ovld __cnfn rhadd(ulong3 x, ulong3 y); +long4 __ovld __cnfn rhadd(long4 x, long4 y); +ulong4 __ovld __cnfn rhadd(ulong4 x, ulong4 y); +long8 __ovld __cnfn rhadd(long8 x, long8 y); +ulong8 __ovld __cnfn rhadd(ulong8 x, ulong8 y); +long16 __ovld __cnfn rhadd(long16 x, long16 y); +ulong16 __ovld __cnfn rhadd(ulong16 x, ulong16 y); + +/** + * Returns min(max(x, minval), maxval). + * Results are undefined if minval > maxval. + */ +char __ovld __cnfn clamp(char x, char minval, char maxval); +uchar __ovld __cnfn clamp(uchar x, uchar minval, uchar maxval); +char2 __ovld __cnfn clamp(char2 x, char2 minval, char2 maxval); +uchar2 __ovld __cnfn clamp(uchar2 x, uchar2 minval, uchar2 maxval); +char3 __ovld __cnfn clamp(char3 x, char3 minval, char3 maxval); +uchar3 __ovld __cnfn clamp(uchar3 x, uchar3 minval, uchar3 maxval); +char4 __ovld __cnfn clamp(char4 x, char4 minval, char4 maxval); +uchar4 __ovld __cnfn clamp(uchar4 x, uchar4 minval, uchar4 maxval); +char8 __ovld __cnfn clamp(char8 x, char8 minval, char8 maxval); +uchar8 __ovld __cnfn clamp(uchar8 x, uchar8 minval, uchar8 maxval); +char16 __ovld __cnfn clamp(char16 x, char16 minval, char16 maxval); +uchar16 __ovld __cnfn clamp(uchar16 x, uchar16 minval, uchar16 maxval); +short __ovld __cnfn clamp(short x, short minval, short maxval); +ushort __ovld __cnfn clamp(ushort x, ushort minval, ushort maxval); +short2 __ovld __cnfn clamp(short2 x, short2 minval, short2 maxval); +ushort2 __ovld __cnfn clamp(ushort2 x, ushort2 minval, ushort2 maxval); +short3 __ovld __cnfn clamp(short3 x, short3 minval, short3 maxval); +ushort3 __ovld __cnfn clamp(ushort3 x, ushort3 minval, ushort3 maxval); +short4 __ovld __cnfn clamp(short4 x, short4 minval, short4 maxval); +ushort4 __ovld __cnfn clamp(ushort4 x, ushort4 minval, ushort4 maxval); +short8 __ovld __cnfn clamp(short8 x, short8 minval, short8 maxval); +ushort8 __ovld __cnfn clamp(ushort8 x, ushort8 minval, ushort8 maxval); +short16 __ovld __cnfn clamp(short16 x, short16 minval, short16 maxval); +ushort16 __ovld __cnfn clamp(ushort16 x, ushort16 minval, ushort16 maxval); +int __ovld __cnfn clamp(int x, int minval, int maxval); +uint __ovld __cnfn clamp(uint x, uint minval, uint maxval); +int2 __ovld __cnfn clamp(int2 x, int2 minval, int2 maxval); +uint2 __ovld __cnfn clamp(uint2 x, uint2 minval, uint2 maxval); +int3 __ovld __cnfn clamp(int3 x, int3 minval, int3 maxval); +uint3 __ovld __cnfn clamp(uint3 x, uint3 minval, uint3 maxval); +int4 __ovld __cnfn clamp(int4 x, int4 minval, int4 maxval); +uint4 __ovld __cnfn clamp(uint4 x, uint4 minval, uint4 maxval); +int8 __ovld __cnfn clamp(int8 x, int8 minval, int8 maxval); +uint8 __ovld __cnfn clamp(uint8 x, uint8 minval, uint8 maxval); +int16 __ovld __cnfn clamp(int16 x, int16 minval, int16 maxval); +uint16 __ovld __cnfn clamp(uint16 x, uint16 minval, uint16 maxval); +long __ovld __cnfn clamp(long x, long minval, long maxval); +ulong __ovld __cnfn clamp(ulong x, ulong minval, ulong maxval); +long2 __ovld __cnfn clamp(long2 x, long2 minval, long2 maxval); +ulong2 __ovld __cnfn clamp(ulong2 x, ulong2 minval, ulong2 maxval); +long3 __ovld __cnfn clamp(long3 x, long3 minval, long3 maxval); +ulong3 __ovld __cnfn clamp(ulong3 x, ulong3 minval, ulong3 maxval); +long4 __ovld __cnfn clamp(long4 x, long4 minval, long4 maxval); +ulong4 __ovld __cnfn clamp(ulong4 x, ulong4 minval, ulong4 maxval); +long8 __ovld __cnfn clamp(long8 x, long8 minval, long8 maxval); +ulong8 __ovld __cnfn clamp(ulong8 x, ulong8 minval, ulong8 maxval); +long16 __ovld __cnfn clamp(long16 x, long16 minval, long16 maxval); +ulong16 __ovld __cnfn clamp(ulong16 x, ulong16 minval, ulong16 maxval); +char __ovld __cnfn clamp(char x, char minval, char maxval); +uchar __ovld __cnfn clamp(uchar x, uchar minval, uchar maxval); +char2 __ovld __cnfn clamp(char2 x, char minval, char maxval); +uchar2 __ovld __cnfn clamp(uchar2 x, uchar minval, uchar maxval); +char3 __ovld __cnfn clamp(char3 x, char minval, char maxval); +uchar3 __ovld __cnfn clamp(uchar3 x, uchar minval, uchar maxval); +char4 __ovld __cnfn clamp(char4 x, char minval, char maxval); +uchar4 __ovld __cnfn clamp(uchar4 x, uchar minval, uchar maxval); +char8 __ovld __cnfn clamp(char8 x, char minval, char maxval); +uchar8 __ovld __cnfn clamp(uchar8 x, uchar minval, uchar maxval); +char16 __ovld __cnfn clamp(char16 x, char minval, char maxval); +uchar16 __ovld __cnfn clamp(uchar16 x, uchar minval, uchar maxval); +short __ovld __cnfn clamp(short x, short minval, short maxval); +ushort __ovld __cnfn clamp(ushort x, ushort minval, ushort maxval); +short2 __ovld __cnfn clamp(short2 x, short minval, short maxval); +ushort2 __ovld __cnfn clamp(ushort2 x, ushort minval, ushort maxval); +short3 __ovld __cnfn clamp(short3 x, short minval, short maxval); +ushort3 __ovld __cnfn clamp(ushort3 x, ushort minval, ushort maxval); +short4 __ovld __cnfn clamp(short4 x, short minval, short maxval); +ushort4 __ovld __cnfn clamp(ushort4 x, ushort minval, ushort maxval); +short8 __ovld __cnfn clamp(short8 x, short minval, short maxval); +ushort8 __ovld __cnfn clamp(ushort8 x, ushort minval, ushort maxval); +short16 __ovld __cnfn clamp(short16 x, short minval, short maxval); +ushort16 __ovld __cnfn clamp(ushort16 x, ushort minval, ushort maxval); +int __ovld __cnfn clamp(int x, int minval, int maxval); +uint __ovld __cnfn clamp(uint x, uint minval, uint maxval); +int2 __ovld __cnfn clamp(int2 x, int minval, int maxval); +uint2 __ovld __cnfn clamp(uint2 x, uint minval, uint maxval); +int3 __ovld __cnfn clamp(int3 x, int minval, int maxval); +uint3 __ovld __cnfn clamp(uint3 x, uint minval, uint maxval); +int4 __ovld __cnfn clamp(int4 x, int minval, int maxval); +uint4 __ovld __cnfn clamp(uint4 x, uint minval, uint maxval); +int8 __ovld __cnfn clamp(int8 x, int minval, int maxval); +uint8 __ovld __cnfn clamp(uint8 x, uint minval, uint maxval); +int16 __ovld __cnfn clamp(int16 x, int minval, int maxval); +uint16 __ovld __cnfn clamp(uint16 x, uint minval, uint maxval); +long __ovld __cnfn clamp(long x, long minval, long maxval); +ulong __ovld __cnfn clamp(ulong x, ulong minval, ulong maxval); +long2 __ovld __cnfn clamp(long2 x, long minval, long maxval); +ulong2 __ovld __cnfn clamp(ulong2 x, ulong minval, ulong maxval); +long3 __ovld __cnfn clamp(long3 x, long minval, long maxval); +ulong3 __ovld __cnfn clamp(ulong3 x, ulong minval, ulong maxval); +long4 __ovld __cnfn clamp(long4 x, long minval, long maxval); +ulong4 __ovld __cnfn clamp(ulong4 x, ulong minval, ulong maxval); +long8 __ovld __cnfn clamp(long8 x, long minval, long maxval); +ulong8 __ovld __cnfn clamp(ulong8 x, ulong minval, ulong maxval); +long16 __ovld __cnfn clamp(long16 x, long minval, long maxval); +ulong16 __ovld __cnfn clamp(ulong16 x, ulong minval, ulong maxval); + +/** + * Returns the number of leading 0-bits in x, starting + * at the most significant bit position. + */ +char __ovld __cnfn clz(char x); +uchar __ovld __cnfn clz(uchar x); +char2 __ovld __cnfn clz(char2 x); +uchar2 __ovld __cnfn clz(uchar2 x); +char3 __ovld __cnfn clz(char3 x); +uchar3 __ovld __cnfn clz(uchar3 x); +char4 __ovld __cnfn clz(char4 x); +uchar4 __ovld __cnfn clz(uchar4 x); +char8 __ovld __cnfn clz(char8 x); +uchar8 __ovld __cnfn clz(uchar8 x); +char16 __ovld __cnfn clz(char16 x); +uchar16 __ovld __cnfn clz(uchar16 x); +short __ovld __cnfn clz(short x); +ushort __ovld __cnfn clz(ushort x); +short2 __ovld __cnfn clz(short2 x); +ushort2 __ovld __cnfn clz(ushort2 x); +short3 __ovld __cnfn clz(short3 x); +ushort3 __ovld __cnfn clz(ushort3 x); +short4 __ovld __cnfn clz(short4 x); +ushort4 __ovld __cnfn clz(ushort4 x); +short8 __ovld __cnfn clz(short8 x); +ushort8 __ovld __cnfn clz(ushort8 x); +short16 __ovld __cnfn clz(short16 x); +ushort16 __ovld __cnfn clz(ushort16 x); +int __ovld __cnfn clz(int x); +uint __ovld __cnfn clz(uint x); +int2 __ovld __cnfn clz(int2 x); +uint2 __ovld __cnfn clz(uint2 x); +int3 __ovld __cnfn clz(int3 x); +uint3 __ovld __cnfn clz(uint3 x); +int4 __ovld __cnfn clz(int4 x); +uint4 __ovld __cnfn clz(uint4 x); +int8 __ovld __cnfn clz(int8 x); +uint8 __ovld __cnfn clz(uint8 x); +int16 __ovld __cnfn clz(int16 x); +uint16 __ovld __cnfn clz(uint16 x); +long __ovld __cnfn clz(long x); +ulong __ovld __cnfn clz(ulong x); +long2 __ovld __cnfn clz(long2 x); +ulong2 __ovld __cnfn clz(ulong2 x); +long3 __ovld __cnfn clz(long3 x); +ulong3 __ovld __cnfn clz(ulong3 x); +long4 __ovld __cnfn clz(long4 x); +ulong4 __ovld __cnfn clz(ulong4 x); +long8 __ovld __cnfn clz(long8 x); +ulong8 __ovld __cnfn clz(ulong8 x); +long16 __ovld __cnfn clz(long16 x); +ulong16 __ovld __cnfn clz(ulong16 x); + +/** + * Returns the count of trailing 0-bits in x. If x is 0, + * returns the size in bits of the type of x or + * component type of x, if x is a vector. + */ +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +char __ovld ctz(char x); +uchar __ovld ctz(uchar x); +char2 __ovld ctz(char2 x); +uchar2 __ovld ctz(uchar2 x); +char3 __ovld ctz(char3 x); +uchar3 __ovld ctz(uchar3 x); +char4 __ovld ctz(char4 x); +uchar4 __ovld ctz(uchar4 x); +char8 __ovld ctz(char8 x); +uchar8 __ovld ctz(uchar8 x); +char16 __ovld ctz(char16 x); +uchar16 __ovld ctz(uchar16 x); +short __ovld ctz(short x); +ushort __ovld ctz(ushort x); +short2 __ovld ctz(short2 x); +ushort2 __ovld ctz(ushort2 x); +short3 __ovld ctz(short3 x); +ushort3 __ovld ctz(ushort3 x); +short4 __ovld ctz(short4 x); +ushort4 __ovld ctz(ushort4 x); +short8 __ovld ctz(short8 x); +ushort8 __ovld ctz(ushort8 x); +short16 __ovld ctz(short16 x); +ushort16 __ovld ctz(ushort16 x); +int __ovld ctz(int x); +uint __ovld ctz(uint x); +int2 __ovld ctz(int2 x); +uint2 __ovld ctz(uint2 x); +int3 __ovld ctz(int3 x); +uint3 __ovld ctz(uint3 x); +int4 __ovld ctz(int4 x); +uint4 __ovld ctz(uint4 x); +int8 __ovld ctz(int8 x); +uint8 __ovld ctz(uint8 x); +int16 __ovld ctz(int16 x); +uint16 __ovld ctz(uint16 x); +long __ovld ctz(long x); +ulong __ovld ctz(ulong x); +long2 __ovld ctz(long2 x); +ulong2 __ovld ctz(ulong2 x); +long3 __ovld ctz(long3 x); +ulong3 __ovld ctz(ulong3 x); +long4 __ovld ctz(long4 x); +ulong4 __ovld ctz(ulong4 x); +long8 __ovld ctz(long8 x); +ulong8 __ovld ctz(ulong8 x); +long16 __ovld ctz(long16 x); +ulong16 __ovld ctz(ulong16 x); +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +/** + * Returns mul_hi(a, b) + c. + */ +char __ovld __cnfn mad_hi(char a, char b, char c); +uchar __ovld __cnfn mad_hi(uchar a, uchar b, uchar c); +char2 __ovld __cnfn mad_hi(char2 a, char2 b, char2 c); +uchar2 __ovld __cnfn mad_hi(uchar2 a, uchar2 b, uchar2 c); +char3 __ovld __cnfn mad_hi(char3 a, char3 b, char3 c); +uchar3 __ovld __cnfn mad_hi(uchar3 a, uchar3 b, uchar3 c); +char4 __ovld __cnfn mad_hi(char4 a, char4 b, char4 c); +uchar4 __ovld __cnfn mad_hi(uchar4 a, uchar4 b, uchar4 c); +char8 __ovld __cnfn mad_hi(char8 a, char8 b, char8 c); +uchar8 __ovld __cnfn mad_hi(uchar8 a, uchar8 b, uchar8 c); +char16 __ovld __cnfn mad_hi(char16 a, char16 b, char16 c); +uchar16 __ovld __cnfn mad_hi(uchar16 a, uchar16 b, uchar16 c); +short __ovld __cnfn mad_hi(short a, short b, short c); +ushort __ovld __cnfn mad_hi(ushort a, ushort b, ushort c); +short2 __ovld __cnfn mad_hi(short2 a, short2 b, short2 c); +ushort2 __ovld __cnfn mad_hi(ushort2 a, ushort2 b, ushort2 c); +short3 __ovld __cnfn mad_hi(short3 a, short3 b, short3 c); +ushort3 __ovld __cnfn mad_hi(ushort3 a, ushort3 b, ushort3 c); +short4 __ovld __cnfn mad_hi(short4 a, short4 b, short4 c); +ushort4 __ovld __cnfn mad_hi(ushort4 a, ushort4 b, ushort4 c); +short8 __ovld __cnfn mad_hi(short8 a, short8 b, short8 c); +ushort8 __ovld __cnfn mad_hi(ushort8 a, ushort8 b, ushort8 c); +short16 __ovld __cnfn mad_hi(short16 a, short16 b, short16 c); +ushort16 __ovld __cnfn mad_hi(ushort16 a, ushort16 b, ushort16 c); +int __ovld __cnfn mad_hi(int a, int b, int c); +uint __ovld __cnfn mad_hi(uint a, uint b, uint c); +int2 __ovld __cnfn mad_hi(int2 a, int2 b, int2 c); +uint2 __ovld __cnfn mad_hi(uint2 a, uint2 b, uint2 c); +int3 __ovld __cnfn mad_hi(int3 a, int3 b, int3 c); +uint3 __ovld __cnfn mad_hi(uint3 a, uint3 b, uint3 c); +int4 __ovld __cnfn mad_hi(int4 a, int4 b, int4 c); +uint4 __ovld __cnfn mad_hi(uint4 a, uint4 b, uint4 c); +int8 __ovld __cnfn mad_hi(int8 a, int8 b, int8 c); +uint8 __ovld __cnfn mad_hi(uint8 a, uint8 b, uint8 c); +int16 __ovld __cnfn mad_hi(int16 a, int16 b, int16 c); +uint16 __ovld __cnfn mad_hi(uint16 a, uint16 b, uint16 c); +long __ovld __cnfn mad_hi(long a, long b, long c); +ulong __ovld __cnfn mad_hi(ulong a, ulong b, ulong c); +long2 __ovld __cnfn mad_hi(long2 a, long2 b, long2 c); +ulong2 __ovld __cnfn mad_hi(ulong2 a, ulong2 b, ulong2 c); +long3 __ovld __cnfn mad_hi(long3 a, long3 b, long3 c); +ulong3 __ovld __cnfn mad_hi(ulong3 a, ulong3 b, ulong3 c); +long4 __ovld __cnfn mad_hi(long4 a, long4 b, long4 c); +ulong4 __ovld __cnfn mad_hi(ulong4 a, ulong4 b, ulong4 c); +long8 __ovld __cnfn mad_hi(long8 a, long8 b, long8 c); +ulong8 __ovld __cnfn mad_hi(ulong8 a, ulong8 b, ulong8 c); +long16 __ovld __cnfn mad_hi(long16 a, long16 b, long16 c); +ulong16 __ovld __cnfn mad_hi(ulong16 a, ulong16 b, ulong16 c); + +/** + * Returns a * b + c and saturates the result. + */ +char __ovld __cnfn mad_sat(char a, char b, char c); +uchar __ovld __cnfn mad_sat(uchar a, uchar b, uchar c); +char2 __ovld __cnfn mad_sat(char2 a, char2 b, char2 c); +uchar2 __ovld __cnfn mad_sat(uchar2 a, uchar2 b, uchar2 c); +char3 __ovld __cnfn mad_sat(char3 a, char3 b, char3 c); +uchar3 __ovld __cnfn mad_sat(uchar3 a, uchar3 b, uchar3 c); +char4 __ovld __cnfn mad_sat(char4 a, char4 b, char4 c); +uchar4 __ovld __cnfn mad_sat(uchar4 a, uchar4 b, uchar4 c); +char8 __ovld __cnfn mad_sat(char8 a, char8 b, char8 c); +uchar8 __ovld __cnfn mad_sat(uchar8 a, uchar8 b, uchar8 c); +char16 __ovld __cnfn mad_sat(char16 a, char16 b, char16 c); +uchar16 __ovld __cnfn mad_sat(uchar16 a, uchar16 b, uchar16 c); +short __ovld __cnfn mad_sat(short a, short b, short c); +ushort __ovld __cnfn mad_sat(ushort a, ushort b, ushort c); +short2 __ovld __cnfn mad_sat(short2 a, short2 b, short2 c); +ushort2 __ovld __cnfn mad_sat(ushort2 a, ushort2 b, ushort2 c); +short3 __ovld __cnfn mad_sat(short3 a, short3 b, short3 c); +ushort3 __ovld __cnfn mad_sat(ushort3 a, ushort3 b, ushort3 c); +short4 __ovld __cnfn mad_sat(short4 a, short4 b, short4 c); +ushort4 __ovld __cnfn mad_sat(ushort4 a, ushort4 b, ushort4 c); +short8 __ovld __cnfn mad_sat(short8 a, short8 b, short8 c); +ushort8 __ovld __cnfn mad_sat(ushort8 a, ushort8 b, ushort8 c); +short16 __ovld __cnfn mad_sat(short16 a, short16 b, short16 c); +ushort16 __ovld __cnfn mad_sat(ushort16 a, ushort16 b, ushort16 c); +int __ovld __cnfn mad_sat(int a, int b, int c); +uint __ovld __cnfn mad_sat(uint a, uint b, uint c); +int2 __ovld __cnfn mad_sat(int2 a, int2 b, int2 c); +uint2 __ovld __cnfn mad_sat(uint2 a, uint2 b, uint2 c); +int3 __ovld __cnfn mad_sat(int3 a, int3 b, int3 c); +uint3 __ovld __cnfn mad_sat(uint3 a, uint3 b, uint3 c); +int4 __ovld __cnfn mad_sat(int4 a, int4 b, int4 c); +uint4 __ovld __cnfn mad_sat(uint4 a, uint4 b, uint4 c); +int8 __ovld __cnfn mad_sat(int8 a, int8 b, int8 c); +uint8 __ovld __cnfn mad_sat(uint8 a, uint8 b, uint8 c); +int16 __ovld __cnfn mad_sat(int16 a, int16 b, int16 c); +uint16 __ovld __cnfn mad_sat(uint16 a, uint16 b, uint16 c); +long __ovld __cnfn mad_sat(long a, long b, long c); +ulong __ovld __cnfn mad_sat(ulong a, ulong b, ulong c); +long2 __ovld __cnfn mad_sat(long2 a, long2 b, long2 c); +ulong2 __ovld __cnfn mad_sat(ulong2 a, ulong2 b, ulong2 c); +long3 __ovld __cnfn mad_sat(long3 a, long3 b, long3 c); +ulong3 __ovld __cnfn mad_sat(ulong3 a, ulong3 b, ulong3 c); +long4 __ovld __cnfn mad_sat(long4 a, long4 b, long4 c); +ulong4 __ovld __cnfn mad_sat(ulong4 a, ulong4 b, ulong4 c); +long8 __ovld __cnfn mad_sat(long8 a, long8 b, long8 c); +ulong8 __ovld __cnfn mad_sat(ulong8 a, ulong8 b, ulong8 c); +long16 __ovld __cnfn mad_sat(long16 a, long16 b, long16 c); +ulong16 __ovld __cnfn mad_sat(ulong16 a, ulong16 b, ulong16 c); + +/** + * Returns y if x < y, otherwise it returns x. + */ +char __ovld __cnfn max(char x, char y); +uchar __ovld __cnfn max(uchar x, uchar y); +char2 __ovld __cnfn max(char2 x, char2 y); +uchar2 __ovld __cnfn max(uchar2 x, uchar2 y); +char3 __ovld __cnfn max(char3 x, char3 y); +uchar3 __ovld __cnfn max(uchar3 x, uchar3 y); +char4 __ovld __cnfn max(char4 x, char4 y); +uchar4 __ovld __cnfn max(uchar4 x, uchar4 y); +char8 __ovld __cnfn max(char8 x, char8 y); +uchar8 __ovld __cnfn max(uchar8 x, uchar8 y); +char16 __ovld __cnfn max(char16 x, char16 y); +uchar16 __ovld __cnfn max(uchar16 x, uchar16 y); +short __ovld __cnfn max(short x, short y); +ushort __ovld __cnfn max(ushort x, ushort y); +short2 __ovld __cnfn max(short2 x, short2 y); +ushort2 __ovld __cnfn max(ushort2 x, ushort2 y); +short3 __ovld __cnfn max(short3 x, short3 y); +ushort3 __ovld __cnfn max(ushort3 x, ushort3 y); +short4 __ovld __cnfn max(short4 x, short4 y); +ushort4 __ovld __cnfn max(ushort4 x, ushort4 y); +short8 __ovld __cnfn max(short8 x, short8 y); +ushort8 __ovld __cnfn max(ushort8 x, ushort8 y); +short16 __ovld __cnfn max(short16 x, short16 y); +ushort16 __ovld __cnfn max(ushort16 x, ushort16 y); +int __ovld __cnfn max(int x, int y); +uint __ovld __cnfn max(uint x, uint y); +int2 __ovld __cnfn max(int2 x, int2 y); +uint2 __ovld __cnfn max(uint2 x, uint2 y); +int3 __ovld __cnfn max(int3 x, int3 y); +uint3 __ovld __cnfn max(uint3 x, uint3 y); +int4 __ovld __cnfn max(int4 x, int4 y); +uint4 __ovld __cnfn max(uint4 x, uint4 y); +int8 __ovld __cnfn max(int8 x, int8 y); +uint8 __ovld __cnfn max(uint8 x, uint8 y); +int16 __ovld __cnfn max(int16 x, int16 y); +uint16 __ovld __cnfn max(uint16 x, uint16 y); +long __ovld __cnfn max(long x, long y); +ulong __ovld __cnfn max(ulong x, ulong y); +long2 __ovld __cnfn max(long2 x, long2 y); +ulong2 __ovld __cnfn max(ulong2 x, ulong2 y); +long3 __ovld __cnfn max(long3 x, long3 y); +ulong3 __ovld __cnfn max(ulong3 x, ulong3 y); +long4 __ovld __cnfn max(long4 x, long4 y); +ulong4 __ovld __cnfn max(ulong4 x, ulong4 y); +long8 __ovld __cnfn max(long8 x, long8 y); +ulong8 __ovld __cnfn max(ulong8 x, ulong8 y); +long16 __ovld __cnfn max(long16 x, long16 y); +ulong16 __ovld __cnfn max(ulong16 x, ulong16 y); +char __ovld __cnfn max(char x, char y); +uchar __ovld __cnfn max(uchar x, uchar y); +char2 __ovld __cnfn max(char2 x, char y); +uchar2 __ovld __cnfn max(uchar2 x, uchar y); +char3 __ovld __cnfn max(char3 x, char y); +uchar3 __ovld __cnfn max(uchar3 x, uchar y); +char4 __ovld __cnfn max(char4 x, char y); +uchar4 __ovld __cnfn max(uchar4 x, uchar y); +char8 __ovld __cnfn max(char8 x, char y); +uchar8 __ovld __cnfn max(uchar8 x, uchar y); +char16 __ovld __cnfn max(char16 x, char y); +uchar16 __ovld __cnfn max(uchar16 x, uchar y); +short __ovld __cnfn max(short x, short y); +ushort __ovld __cnfn max(ushort x, ushort y); +short2 __ovld __cnfn max(short2 x, short y); +ushort2 __ovld __cnfn max(ushort2 x, ushort y); +short3 __ovld __cnfn max(short3 x, short y); +ushort3 __ovld __cnfn max(ushort3 x, ushort y); +short4 __ovld __cnfn max(short4 x, short y); +ushort4 __ovld __cnfn max(ushort4 x, ushort y); +short8 __ovld __cnfn max(short8 x, short y); +ushort8 __ovld __cnfn max(ushort8 x, ushort y); +short16 __ovld __cnfn max(short16 x, short y); +ushort16 __ovld __cnfn max(ushort16 x, ushort y); +int __ovld __cnfn max(int x, int y); +uint __ovld __cnfn max(uint x, uint y); +int2 __ovld __cnfn max(int2 x, int y); +uint2 __ovld __cnfn max(uint2 x, uint y); +int3 __ovld __cnfn max(int3 x, int y); +uint3 __ovld __cnfn max(uint3 x, uint y); +int4 __ovld __cnfn max(int4 x, int y); +uint4 __ovld __cnfn max(uint4 x, uint y); +int8 __ovld __cnfn max(int8 x, int y); +uint8 __ovld __cnfn max(uint8 x, uint y); +int16 __ovld __cnfn max(int16 x, int y); +uint16 __ovld __cnfn max(uint16 x, uint y); +long __ovld __cnfn max(long x, long y); +ulong __ovld __cnfn max(ulong x, ulong y); +long2 __ovld __cnfn max(long2 x, long y); +ulong2 __ovld __cnfn max(ulong2 x, ulong y); +long3 __ovld __cnfn max(long3 x, long y); +ulong3 __ovld __cnfn max(ulong3 x, ulong y); +long4 __ovld __cnfn max(long4 x, long y); +ulong4 __ovld __cnfn max(ulong4 x, ulong y); +long8 __ovld __cnfn max(long8 x, long y); +ulong8 __ovld __cnfn max(ulong8 x, ulong y); +long16 __ovld __cnfn max(long16 x, long y); +ulong16 __ovld __cnfn max(ulong16 x, ulong y); + +/** + * Returns y if y < x, otherwise it returns x. + */ +char __ovld __cnfn min(char x, char y); +uchar __ovld __cnfn min(uchar x, uchar y); +char2 __ovld __cnfn min(char2 x, char2 y); +uchar2 __ovld __cnfn min(uchar2 x, uchar2 y); +char3 __ovld __cnfn min(char3 x, char3 y); +uchar3 __ovld __cnfn min(uchar3 x, uchar3 y); +char4 __ovld __cnfn min(char4 x, char4 y); +uchar4 __ovld __cnfn min(uchar4 x, uchar4 y); +char8 __ovld __cnfn min(char8 x, char8 y); +uchar8 __ovld __cnfn min(uchar8 x, uchar8 y); +char16 __ovld __cnfn min(char16 x, char16 y); +uchar16 __ovld __cnfn min(uchar16 x, uchar16 y); +short __ovld __cnfn min(short x, short y); +ushort __ovld __cnfn min(ushort x, ushort y); +short2 __ovld __cnfn min(short2 x, short2 y); +ushort2 __ovld __cnfn min(ushort2 x, ushort2 y); +short3 __ovld __cnfn min(short3 x, short3 y); +ushort3 __ovld __cnfn min(ushort3 x, ushort3 y); +short4 __ovld __cnfn min(short4 x, short4 y); +ushort4 __ovld __cnfn min(ushort4 x, ushort4 y); +short8 __ovld __cnfn min(short8 x, short8 y); +ushort8 __ovld __cnfn min(ushort8 x, ushort8 y); +short16 __ovld __cnfn min(short16 x, short16 y); +ushort16 __ovld __cnfn min(ushort16 x, ushort16 y); +int __ovld __cnfn min(int x, int y); +uint __ovld __cnfn min(uint x, uint y); +int2 __ovld __cnfn min(int2 x, int2 y); +uint2 __ovld __cnfn min(uint2 x, uint2 y); +int3 __ovld __cnfn min(int3 x, int3 y); +uint3 __ovld __cnfn min(uint3 x, uint3 y); +int4 __ovld __cnfn min(int4 x, int4 y); +uint4 __ovld __cnfn min(uint4 x, uint4 y); +int8 __ovld __cnfn min(int8 x, int8 y); +uint8 __ovld __cnfn min(uint8 x, uint8 y); +int16 __ovld __cnfn min(int16 x, int16 y); +uint16 __ovld __cnfn min(uint16 x, uint16 y); +long __ovld __cnfn min(long x, long y); +ulong __ovld __cnfn min(ulong x, ulong y); +long2 __ovld __cnfn min(long2 x, long2 y); +ulong2 __ovld __cnfn min(ulong2 x, ulong2 y); +long3 __ovld __cnfn min(long3 x, long3 y); +ulong3 __ovld __cnfn min(ulong3 x, ulong3 y); +long4 __ovld __cnfn min(long4 x, long4 y); +ulong4 __ovld __cnfn min(ulong4 x, ulong4 y); +long8 __ovld __cnfn min(long8 x, long8 y); +ulong8 __ovld __cnfn min(ulong8 x, ulong8 y); +long16 __ovld __cnfn min(long16 x, long16 y); +ulong16 __ovld __cnfn min(ulong16 x, ulong16 y); +char __ovld __cnfn min(char x, char y); +uchar __ovld __cnfn min(uchar x, uchar y); +char2 __ovld __cnfn min(char2 x, char y); +uchar2 __ovld __cnfn min(uchar2 x, uchar y); +char3 __ovld __cnfn min(char3 x, char y); +uchar3 __ovld __cnfn min(uchar3 x, uchar y); +char4 __ovld __cnfn min(char4 x, char y); +uchar4 __ovld __cnfn min(uchar4 x, uchar y); +char8 __ovld __cnfn min(char8 x, char y); +uchar8 __ovld __cnfn min(uchar8 x, uchar y); +char16 __ovld __cnfn min(char16 x, char y); +uchar16 __ovld __cnfn min(uchar16 x, uchar y); +short __ovld __cnfn min(short x, short y); +ushort __ovld __cnfn min(ushort x, ushort y); +short2 __ovld __cnfn min(short2 x, short y); +ushort2 __ovld __cnfn min(ushort2 x, ushort y); +short3 __ovld __cnfn min(short3 x, short y); +ushort3 __ovld __cnfn min(ushort3 x, ushort y); +short4 __ovld __cnfn min(short4 x, short y); +ushort4 __ovld __cnfn min(ushort4 x, ushort y); +short8 __ovld __cnfn min(short8 x, short y); +ushort8 __ovld __cnfn min(ushort8 x, ushort y); +short16 __ovld __cnfn min(short16 x, short y); +ushort16 __ovld __cnfn min(ushort16 x, ushort y); +int __ovld __cnfn min(int x, int y); +uint __ovld __cnfn min(uint x, uint y); +int2 __ovld __cnfn min(int2 x, int y); +uint2 __ovld __cnfn min(uint2 x, uint y); +int3 __ovld __cnfn min(int3 x, int y); +uint3 __ovld __cnfn min(uint3 x, uint y); +int4 __ovld __cnfn min(int4 x, int y); +uint4 __ovld __cnfn min(uint4 x, uint y); +int8 __ovld __cnfn min(int8 x, int y); +uint8 __ovld __cnfn min(uint8 x, uint y); +int16 __ovld __cnfn min(int16 x, int y); +uint16 __ovld __cnfn min(uint16 x, uint y); +long __ovld __cnfn min(long x, long y); +ulong __ovld __cnfn min(ulong x, ulong y); +long2 __ovld __cnfn min(long2 x, long y); +ulong2 __ovld __cnfn min(ulong2 x, ulong y); +long3 __ovld __cnfn min(long3 x, long y); +ulong3 __ovld __cnfn min(ulong3 x, ulong y); +long4 __ovld __cnfn min(long4 x, long y); +ulong4 __ovld __cnfn min(ulong4 x, ulong y); +long8 __ovld __cnfn min(long8 x, long y); +ulong8 __ovld __cnfn min(ulong8 x, ulong y); +long16 __ovld __cnfn min(long16 x, long y); +ulong16 __ovld __cnfn min(ulong16 x, ulong y); + +/** + * Computes x * y and returns the high half of the + * product of x and y. + */ +char __ovld __cnfn mul_hi(char x, char y); +uchar __ovld __cnfn mul_hi(uchar x, uchar y); +char2 __ovld __cnfn mul_hi(char2 x, char2 y); +uchar2 __ovld __cnfn mul_hi(uchar2 x, uchar2 y); +char3 __ovld __cnfn mul_hi(char3 x, char3 y); +uchar3 __ovld __cnfn mul_hi(uchar3 x, uchar3 y); +char4 __ovld __cnfn mul_hi(char4 x, char4 y); +uchar4 __ovld __cnfn mul_hi(uchar4 x, uchar4 y); +char8 __ovld __cnfn mul_hi(char8 x, char8 y); +uchar8 __ovld __cnfn mul_hi(uchar8 x, uchar8 y); +char16 __ovld __cnfn mul_hi(char16 x, char16 y); +uchar16 __ovld __cnfn mul_hi(uchar16 x, uchar16 y); +short __ovld __cnfn mul_hi(short x, short y); +ushort __ovld __cnfn mul_hi(ushort x, ushort y); +short2 __ovld __cnfn mul_hi(short2 x, short2 y); +ushort2 __ovld __cnfn mul_hi(ushort2 x, ushort2 y); +short3 __ovld __cnfn mul_hi(short3 x, short3 y); +ushort3 __ovld __cnfn mul_hi(ushort3 x, ushort3 y); +short4 __ovld __cnfn mul_hi(short4 x, short4 y); +ushort4 __ovld __cnfn mul_hi(ushort4 x, ushort4 y); +short8 __ovld __cnfn mul_hi(short8 x, short8 y); +ushort8 __ovld __cnfn mul_hi(ushort8 x, ushort8 y); +short16 __ovld __cnfn mul_hi(short16 x, short16 y); +ushort16 __ovld __cnfn mul_hi(ushort16 x, ushort16 y); +int __ovld __cnfn mul_hi(int x, int y); +uint __ovld __cnfn mul_hi(uint x, uint y); +int2 __ovld __cnfn mul_hi(int2 x, int2 y); +uint2 __ovld __cnfn mul_hi(uint2 x, uint2 y); +int3 __ovld __cnfn mul_hi(int3 x, int3 y); +uint3 __ovld __cnfn mul_hi(uint3 x, uint3 y); +int4 __ovld __cnfn mul_hi(int4 x, int4 y); +uint4 __ovld __cnfn mul_hi(uint4 x, uint4 y); +int8 __ovld __cnfn mul_hi(int8 x, int8 y); +uint8 __ovld __cnfn mul_hi(uint8 x, uint8 y); +int16 __ovld __cnfn mul_hi(int16 x, int16 y); +uint16 __ovld __cnfn mul_hi(uint16 x, uint16 y); +long __ovld __cnfn mul_hi(long x, long y); +ulong __ovld __cnfn mul_hi(ulong x, ulong y); +long2 __ovld __cnfn mul_hi(long2 x, long2 y); +ulong2 __ovld __cnfn mul_hi(ulong2 x, ulong2 y); +long3 __ovld __cnfn mul_hi(long3 x, long3 y); +ulong3 __ovld __cnfn mul_hi(ulong3 x, ulong3 y); +long4 __ovld __cnfn mul_hi(long4 x, long4 y); +ulong4 __ovld __cnfn mul_hi(ulong4 x, ulong4 y); +long8 __ovld __cnfn mul_hi(long8 x, long8 y); +ulong8 __ovld __cnfn mul_hi(ulong8 x, ulong8 y); +long16 __ovld __cnfn mul_hi(long16 x, long16 y); +ulong16 __ovld __cnfn mul_hi(ulong16 x, ulong16 y); + +/** + * For each element in v, the bits are shifted left by + * the number of bits given by the corresponding + * element in i (subject to usual shift modulo rules + * described in section 6.3). Bits shifted off the left + * side of the element are shifted back in from the + * right. + */ +char __ovld __cnfn rotate(char v, char i); +uchar __ovld __cnfn rotate(uchar v, uchar i); +char2 __ovld __cnfn rotate(char2 v, char2 i); +uchar2 __ovld __cnfn rotate(uchar2 v, uchar2 i); +char3 __ovld __cnfn rotate(char3 v, char3 i); +uchar3 __ovld __cnfn rotate(uchar3 v, uchar3 i); +char4 __ovld __cnfn rotate(char4 v, char4 i); +uchar4 __ovld __cnfn rotate(uchar4 v, uchar4 i); +char8 __ovld __cnfn rotate(char8 v, char8 i); +uchar8 __ovld __cnfn rotate(uchar8 v, uchar8 i); +char16 __ovld __cnfn rotate(char16 v, char16 i); +uchar16 __ovld __cnfn rotate(uchar16 v, uchar16 i); +short __ovld __cnfn rotate(short v, short i); +ushort __ovld __cnfn rotate(ushort v, ushort i); +short2 __ovld __cnfn rotate(short2 v, short2 i); +ushort2 __ovld __cnfn rotate(ushort2 v, ushort2 i); +short3 __ovld __cnfn rotate(short3 v, short3 i); +ushort3 __ovld __cnfn rotate(ushort3 v, ushort3 i); +short4 __ovld __cnfn rotate(short4 v, short4 i); +ushort4 __ovld __cnfn rotate(ushort4 v, ushort4 i); +short8 __ovld __cnfn rotate(short8 v, short8 i); +ushort8 __ovld __cnfn rotate(ushort8 v, ushort8 i); +short16 __ovld __cnfn rotate(short16 v, short16 i); +ushort16 __ovld __cnfn rotate(ushort16 v, ushort16 i); +int __ovld __cnfn rotate(int v, int i); +uint __ovld __cnfn rotate(uint v, uint i); +int2 __ovld __cnfn rotate(int2 v, int2 i); +uint2 __ovld __cnfn rotate(uint2 v, uint2 i); +int3 __ovld __cnfn rotate(int3 v, int3 i); +uint3 __ovld __cnfn rotate(uint3 v, uint3 i); +int4 __ovld __cnfn rotate(int4 v, int4 i); +uint4 __ovld __cnfn rotate(uint4 v, uint4 i); +int8 __ovld __cnfn rotate(int8 v, int8 i); +uint8 __ovld __cnfn rotate(uint8 v, uint8 i); +int16 __ovld __cnfn rotate(int16 v, int16 i); +uint16 __ovld __cnfn rotate(uint16 v, uint16 i); +long __ovld __cnfn rotate(long v, long i); +ulong __ovld __cnfn rotate(ulong v, ulong i); +long2 __ovld __cnfn rotate(long2 v, long2 i); +ulong2 __ovld __cnfn rotate(ulong2 v, ulong2 i); +long3 __ovld __cnfn rotate(long3 v, long3 i); +ulong3 __ovld __cnfn rotate(ulong3 v, ulong3 i); +long4 __ovld __cnfn rotate(long4 v, long4 i); +ulong4 __ovld __cnfn rotate(ulong4 v, ulong4 i); +long8 __ovld __cnfn rotate(long8 v, long8 i); +ulong8 __ovld __cnfn rotate(ulong8 v, ulong8 i); +long16 __ovld __cnfn rotate(long16 v, long16 i); +ulong16 __ovld __cnfn rotate(ulong16 v, ulong16 i); + +/** + * Returns x - y and saturates the result. + */ +char __ovld __cnfn sub_sat(char x, char y); +uchar __ovld __cnfn sub_sat(uchar x, uchar y); +char2 __ovld __cnfn sub_sat(char2 x, char2 y); +uchar2 __ovld __cnfn sub_sat(uchar2 x, uchar2 y); +char3 __ovld __cnfn sub_sat(char3 x, char3 y); +uchar3 __ovld __cnfn sub_sat(uchar3 x, uchar3 y); +char4 __ovld __cnfn sub_sat(char4 x, char4 y); +uchar4 __ovld __cnfn sub_sat(uchar4 x, uchar4 y); +char8 __ovld __cnfn sub_sat(char8 x, char8 y); +uchar8 __ovld __cnfn sub_sat(uchar8 x, uchar8 y); +char16 __ovld __cnfn sub_sat(char16 x, char16 y); +uchar16 __ovld __cnfn sub_sat(uchar16 x, uchar16 y); +short __ovld __cnfn sub_sat(short x, short y); +ushort __ovld __cnfn sub_sat(ushort x, ushort y); +short2 __ovld __cnfn sub_sat(short2 x, short2 y); +ushort2 __ovld __cnfn sub_sat(ushort2 x, ushort2 y); +short3 __ovld __cnfn sub_sat(short3 x, short3 y); +ushort3 __ovld __cnfn sub_sat(ushort3 x, ushort3 y); +short4 __ovld __cnfn sub_sat(short4 x, short4 y); +ushort4 __ovld __cnfn sub_sat(ushort4 x, ushort4 y); +short8 __ovld __cnfn sub_sat(short8 x, short8 y); +ushort8 __ovld __cnfn sub_sat(ushort8 x, ushort8 y); +short16 __ovld __cnfn sub_sat(short16 x, short16 y); +ushort16 __ovld __cnfn sub_sat(ushort16 x, ushort16 y); +int __ovld __cnfn sub_sat(int x, int y); +uint __ovld __cnfn sub_sat(uint x, uint y); +int2 __ovld __cnfn sub_sat(int2 x, int2 y); +uint2 __ovld __cnfn sub_sat(uint2 x, uint2 y); +int3 __ovld __cnfn sub_sat(int3 x, int3 y); +uint3 __ovld __cnfn sub_sat(uint3 x, uint3 y); +int4 __ovld __cnfn sub_sat(int4 x, int4 y); +uint4 __ovld __cnfn sub_sat(uint4 x, uint4 y); +int8 __ovld __cnfn sub_sat(int8 x, int8 y); +uint8 __ovld __cnfn sub_sat(uint8 x, uint8 y); +int16 __ovld __cnfn sub_sat(int16 x, int16 y); +uint16 __ovld __cnfn sub_sat(uint16 x, uint16 y); +long __ovld __cnfn sub_sat(long x, long y); +ulong __ovld __cnfn sub_sat(ulong x, ulong y); +long2 __ovld __cnfn sub_sat(long2 x, long2 y); +ulong2 __ovld __cnfn sub_sat(ulong2 x, ulong2 y); +long3 __ovld __cnfn sub_sat(long3 x, long3 y); +ulong3 __ovld __cnfn sub_sat(ulong3 x, ulong3 y); +long4 __ovld __cnfn sub_sat(long4 x, long4 y); +ulong4 __ovld __cnfn sub_sat(ulong4 x, ulong4 y); +long8 __ovld __cnfn sub_sat(long8 x, long8 y); +ulong8 __ovld __cnfn sub_sat(ulong8 x, ulong8 y); +long16 __ovld __cnfn sub_sat(long16 x, long16 y); +ulong16 __ovld __cnfn sub_sat(ulong16 x, ulong16 y); + +/** + * result[i] = ((short)hi[i] << 8) | lo[i] + * result[i] = ((ushort)hi[i] << 8) | lo[i] + */ +short __ovld __cnfn upsample(char hi, uchar lo); +ushort __ovld __cnfn upsample(uchar hi, uchar lo); +short2 __ovld __cnfn upsample(char2 hi, uchar2 lo); +short3 __ovld __cnfn upsample(char3 hi, uchar3 lo); +short4 __ovld __cnfn upsample(char4 hi, uchar4 lo); +short8 __ovld __cnfn upsample(char8 hi, uchar8 lo); +short16 __ovld __cnfn upsample(char16 hi, uchar16 lo); +ushort2 __ovld __cnfn upsample(uchar2 hi, uchar2 lo); +ushort3 __ovld __cnfn upsample(uchar3 hi, uchar3 lo); +ushort4 __ovld __cnfn upsample(uchar4 hi, uchar4 lo); +ushort8 __ovld __cnfn upsample(uchar8 hi, uchar8 lo); +ushort16 __ovld __cnfn upsample(uchar16 hi, uchar16 lo); + +/** + * result[i] = ((int)hi[i] << 16) | lo[i] + * result[i] = ((uint)hi[i] << 16) | lo[i] + */ +int __ovld __cnfn upsample(short hi, ushort lo); +uint __ovld __cnfn upsample(ushort hi, ushort lo); +int2 __ovld __cnfn upsample(short2 hi, ushort2 lo); +int3 __ovld __cnfn upsample(short3 hi, ushort3 lo); +int4 __ovld __cnfn upsample(short4 hi, ushort4 lo); +int8 __ovld __cnfn upsample(short8 hi, ushort8 lo); +int16 __ovld __cnfn upsample(short16 hi, ushort16 lo); +uint2 __ovld __cnfn upsample(ushort2 hi, ushort2 lo); +uint3 __ovld __cnfn upsample(ushort3 hi, ushort3 lo); +uint4 __ovld __cnfn upsample(ushort4 hi, ushort4 lo); +uint8 __ovld __cnfn upsample(ushort8 hi, ushort8 lo); +uint16 __ovld __cnfn upsample(ushort16 hi, ushort16 lo); +/** + * result[i] = ((long)hi[i] << 32) | lo[i] + * result[i] = ((ulong)hi[i] << 32) | lo[i] + */ +long __ovld __cnfn upsample(int hi, uint lo); +ulong __ovld __cnfn upsample(uint hi, uint lo); +long2 __ovld __cnfn upsample(int2 hi, uint2 lo); +long3 __ovld __cnfn upsample(int3 hi, uint3 lo); +long4 __ovld __cnfn upsample(int4 hi, uint4 lo); +long8 __ovld __cnfn upsample(int8 hi, uint8 lo); +long16 __ovld __cnfn upsample(int16 hi, uint16 lo); +ulong2 __ovld __cnfn upsample(uint2 hi, uint2 lo); +ulong3 __ovld __cnfn upsample(uint3 hi, uint3 lo); +ulong4 __ovld __cnfn upsample(uint4 hi, uint4 lo); +ulong8 __ovld __cnfn upsample(uint8 hi, uint8 lo); +ulong16 __ovld __cnfn upsample(uint16 hi, uint16 lo); + +/* + * popcount(x): returns the number of set bit in x + */ +char __ovld __cnfn popcount(char x); +uchar __ovld __cnfn popcount(uchar x); +char2 __ovld __cnfn popcount(char2 x); +uchar2 __ovld __cnfn popcount(uchar2 x); +char3 __ovld __cnfn popcount(char3 x); +uchar3 __ovld __cnfn popcount(uchar3 x); +char4 __ovld __cnfn popcount(char4 x); +uchar4 __ovld __cnfn popcount(uchar4 x); +char8 __ovld __cnfn popcount(char8 x); +uchar8 __ovld __cnfn popcount(uchar8 x); +char16 __ovld __cnfn popcount(char16 x); +uchar16 __ovld __cnfn popcount(uchar16 x); +short __ovld __cnfn popcount(short x); +ushort __ovld __cnfn popcount(ushort x); +short2 __ovld __cnfn popcount(short2 x); +ushort2 __ovld __cnfn popcount(ushort2 x); +short3 __ovld __cnfn popcount(short3 x); +ushort3 __ovld __cnfn popcount(ushort3 x); +short4 __ovld __cnfn popcount(short4 x); +ushort4 __ovld __cnfn popcount(ushort4 x); +short8 __ovld __cnfn popcount(short8 x); +ushort8 __ovld __cnfn popcount(ushort8 x); +short16 __ovld __cnfn popcount(short16 x); +ushort16 __ovld __cnfn popcount(ushort16 x); +int __ovld __cnfn popcount(int x); +uint __ovld __cnfn popcount(uint x); +int2 __ovld __cnfn popcount(int2 x); +uint2 __ovld __cnfn popcount(uint2 x); +int3 __ovld __cnfn popcount(int3 x); +uint3 __ovld __cnfn popcount(uint3 x); +int4 __ovld __cnfn popcount(int4 x); +uint4 __ovld __cnfn popcount(uint4 x); +int8 __ovld __cnfn popcount(int8 x); +uint8 __ovld __cnfn popcount(uint8 x); +int16 __ovld __cnfn popcount(int16 x); +uint16 __ovld __cnfn popcount(uint16 x); +long __ovld __cnfn popcount(long x); +ulong __ovld __cnfn popcount(ulong x); +long2 __ovld __cnfn popcount(long2 x); +ulong2 __ovld __cnfn popcount(ulong2 x); +long3 __ovld __cnfn popcount(long3 x); +ulong3 __ovld __cnfn popcount(ulong3 x); +long4 __ovld __cnfn popcount(long4 x); +ulong4 __ovld __cnfn popcount(ulong4 x); +long8 __ovld __cnfn popcount(long8 x); +ulong8 __ovld __cnfn popcount(ulong8 x); +long16 __ovld __cnfn popcount(long16 x); +ulong16 __ovld __cnfn popcount(ulong16 x); + +/** + * Multiply two 24-bit integer values x and y and add + * the 32-bit integer result to the 32-bit integer z. + * Refer to definition of mul24 to see how the 24-bit + * integer multiplication is performed. + */ +int __ovld __cnfn mad24(int x, int y, int z); +uint __ovld __cnfn mad24(uint x, uint y, uint z); +int2 __ovld __cnfn mad24(int2 x, int2 y, int2 z); +uint2 __ovld __cnfn mad24(uint2 x, uint2 y, uint2 z); +int3 __ovld __cnfn mad24(int3 x, int3 y, int3 z); +uint3 __ovld __cnfn mad24(uint3 x, uint3 y, uint3 z); +int4 __ovld __cnfn mad24(int4 x, int4 y, int4 z); +uint4 __ovld __cnfn mad24(uint4 x, uint4 y, uint4 z); +int8 __ovld __cnfn mad24(int8 x, int8 y, int8 z); +uint8 __ovld __cnfn mad24(uint8 x, uint8 y, uint8 z); +int16 __ovld __cnfn mad24(int16 x, int16 y, int16 z); +uint16 __ovld __cnfn mad24(uint16 x, uint16 y, uint16 z); + +/** + * Multiply two 24-bit integer values x and y. x and y + * are 32-bit integers but only the low 24-bits are used + * to perform the multiplication. mul24 should only + * be used when values in x and y are in the range [- + * 2^23, 2^23-1] if x and y are signed integers and in the + * range [0, 2^24-1] if x and y are unsigned integers. If + * x and y are not in this range, the multiplication + * result is implementation-defined. + */ +int __ovld __cnfn mul24(int x, int y); +uint __ovld __cnfn mul24(uint x, uint y); +int2 __ovld __cnfn mul24(int2 x, int2 y); +uint2 __ovld __cnfn mul24(uint2 x, uint2 y); +int3 __ovld __cnfn mul24(int3 x, int3 y); +uint3 __ovld __cnfn mul24(uint3 x, uint3 y); +int4 __ovld __cnfn mul24(int4 x, int4 y); +uint4 __ovld __cnfn mul24(uint4 x, uint4 y); +int8 __ovld __cnfn mul24(int8 x, int8 y); +uint8 __ovld __cnfn mul24(uint8 x, uint8 y); +int16 __ovld __cnfn mul24(int16 x, int16 y); +uint16 __ovld __cnfn mul24(uint16 x, uint16 y); + +// OpenCL v1.1 s6.11.4, v1.2 s6.12.4, v2.0 s6.13.4 - Common Functions + +/** + * Returns fmin(fmax(x, minval), maxval). + * Results are undefined if minval > maxval. + */ +float __ovld __cnfn clamp(float x, float minval, float maxval); +float2 __ovld __cnfn clamp(float2 x, float2 minval, float2 maxval); +float3 __ovld __cnfn clamp(float3 x, float3 minval, float3 maxval); +float4 __ovld __cnfn clamp(float4 x, float4 minval, float4 maxval); +float8 __ovld __cnfn clamp(float8 x, float8 minval, float8 maxval); +float16 __ovld __cnfn clamp(float16 x, float16 minval, float16 maxval); +float2 __ovld __cnfn clamp(float2 x, float minval, float maxval); +float3 __ovld __cnfn clamp(float3 x, float minval, float maxval); +float4 __ovld __cnfn clamp(float4 x, float minval, float maxval); +float8 __ovld __cnfn clamp(float8 x, float minval, float maxval); +float16 __ovld __cnfn clamp(float16 x, float minval, float maxval); +#ifdef cl_khr_fp64 +double __ovld __cnfn clamp(double x, double minval, double maxval); +double2 __ovld __cnfn clamp(double2 x, double2 minval, double2 maxval); +double3 __ovld __cnfn clamp(double3 x, double3 minval, double3 maxval); +double4 __ovld __cnfn clamp(double4 x, double4 minval, double4 maxval); +double8 __ovld __cnfn clamp(double8 x, double8 minval, double8 maxval); +double16 __ovld __cnfn clamp(double16 x, double16 minval, double16 maxval); +double2 __ovld __cnfn clamp(double2 x, double minval, double maxval); +double3 __ovld __cnfn clamp(double3 x, double minval, double maxval); +double4 __ovld __cnfn clamp(double4 x, double minval, double maxval); +double8 __ovld __cnfn clamp(double8 x, double minval, double maxval); +double16 __ovld __cnfn clamp(double16 x, double minval, double maxval); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn clamp(half x, half minval, half maxval); +half2 __ovld __cnfn clamp(half2 x, half2 minval, half2 maxval); +half3 __ovld __cnfn clamp(half3 x, half3 minval, half3 maxval); +half4 __ovld __cnfn clamp(half4 x, half4 minval, half4 maxval); +half8 __ovld __cnfn clamp(half8 x, half8 minval, half8 maxval); +half16 __ovld __cnfn clamp(half16 x, half16 minval, half16 maxval); +half2 __ovld __cnfn clamp(half2 x, half minval, half maxval); +half3 __ovld __cnfn clamp(half3 x, half minval, half maxval); +half4 __ovld __cnfn clamp(half4 x, half minval, half maxval); +half8 __ovld __cnfn clamp(half8 x, half minval, half maxval); +half16 __ovld __cnfn clamp(half16 x, half minval, half maxval); +#endif //cl_khr_fp16 + +/** + * Converts radians to degrees, i.e. (180 / PI) * + * radians. + */ +float __ovld __cnfn degrees(float radians); +float2 __ovld __cnfn degrees(float2 radians); +float3 __ovld __cnfn degrees(float3 radians); +float4 __ovld __cnfn degrees(float4 radians); +float8 __ovld __cnfn degrees(float8 radians); +float16 __ovld __cnfn degrees(float16 radians); +#ifdef cl_khr_fp64 +double __ovld __cnfn degrees(double radians); +double2 __ovld __cnfn degrees(double2 radians); +double3 __ovld __cnfn degrees(double3 radians); +double4 __ovld __cnfn degrees(double4 radians); +double8 __ovld __cnfn degrees(double8 radians); +double16 __ovld __cnfn degrees(double16 radians); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn degrees(half radians); +half2 __ovld __cnfn degrees(half2 radians); +half3 __ovld __cnfn degrees(half3 radians); +half4 __ovld __cnfn degrees(half4 radians); +half8 __ovld __cnfn degrees(half8 radians); +half16 __ovld __cnfn degrees(half16 radians); +#endif //cl_khr_fp16 + +/** + * Returns y if x < y, otherwise it returns x. If x and y + * are infinite or NaN, the return values are undefined. + */ +float __ovld __cnfn max(float x, float y); +float2 __ovld __cnfn max(float2 x, float2 y); +float3 __ovld __cnfn max(float3 x, float3 y); +float4 __ovld __cnfn max(float4 x, float4 y); +float8 __ovld __cnfn max(float8 x, float8 y); +float16 __ovld __cnfn max(float16 x, float16 y); +float2 __ovld __cnfn max(float2 x, float y); +float3 __ovld __cnfn max(float3 x, float y); +float4 __ovld __cnfn max(float4 x, float y); +float8 __ovld __cnfn max(float8 x, float y); +float16 __ovld __cnfn max(float16 x, float y); +#ifdef cl_khr_fp64 +double __ovld __cnfn max(double x, double y); +double2 __ovld __cnfn max(double2 x, double2 y); +double3 __ovld __cnfn max(double3 x, double3 y); +double4 __ovld __cnfn max(double4 x, double4 y); +double8 __ovld __cnfn max(double8 x, double8 y); +double16 __ovld __cnfn max(double16 x, double16 y); +double2 __ovld __cnfn max(double2 x, double y); +double3 __ovld __cnfn max(double3 x, double y); +double4 __ovld __cnfn max(double4 x, double y); +double8 __ovld __cnfn max(double8 x, double y); +double16 __ovld __cnfn max(double16 x, double y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn max(half x, half y); +half2 __ovld __cnfn max(half2 x, half2 y); +half3 __ovld __cnfn max(half3 x, half3 y); +half4 __ovld __cnfn max(half4 x, half4 y); +half8 __ovld __cnfn max(half8 x, half8 y); +half16 __ovld __cnfn max(half16 x, half16 y); +half2 __ovld __cnfn max(half2 x, half y); +half3 __ovld __cnfn max(half3 x, half y); +half4 __ovld __cnfn max(half4 x, half y); +half8 __ovld __cnfn max(half8 x, half y); +half16 __ovld __cnfn max(half16 x, half y); +#endif //cl_khr_fp16 + +/** + * Returns y if y < x, otherwise it returns x. If x and y + * are infinite or NaN, the return values are undefined. + */ +float __ovld __cnfn min(float x, float y); +float2 __ovld __cnfn min(float2 x, float2 y); +float3 __ovld __cnfn min(float3 x, float3 y); +float4 __ovld __cnfn min(float4 x, float4 y); +float8 __ovld __cnfn min(float8 x, float8 y); +float16 __ovld __cnfn min(float16 x, float16 y); +float2 __ovld __cnfn min(float2 x, float y); +float3 __ovld __cnfn min(float3 x, float y); +float4 __ovld __cnfn min(float4 x, float y); +float8 __ovld __cnfn min(float8 x, float y); +float16 __ovld __cnfn min(float16 x, float y); +#ifdef cl_khr_fp64 +double __ovld __cnfn min(double x, double y); +double2 __ovld __cnfn min(double2 x, double2 y); +double3 __ovld __cnfn min(double3 x, double3 y); +double4 __ovld __cnfn min(double4 x, double4 y); +double8 __ovld __cnfn min(double8 x, double8 y); +double16 __ovld __cnfn min(double16 x, double16 y); +double2 __ovld __cnfn min(double2 x, double y); +double3 __ovld __cnfn min(double3 x, double y); +double4 __ovld __cnfn min(double4 x, double y); +double8 __ovld __cnfn min(double8 x, double y); +double16 __ovld __cnfn min(double16 x, double y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn min(half x, half y); +half2 __ovld __cnfn min(half2 x, half2 y); +half3 __ovld __cnfn min(half3 x, half3 y); +half4 __ovld __cnfn min(half4 x, half4 y); +half8 __ovld __cnfn min(half8 x, half8 y); +half16 __ovld __cnfn min(half16 x, half16 y); +half2 __ovld __cnfn min(half2 x, half y); +half3 __ovld __cnfn min(half3 x, half y); +half4 __ovld __cnfn min(half4 x, half y); +half8 __ovld __cnfn min(half8 x, half y); +half16 __ovld __cnfn min(half16 x, half y); +#endif //cl_khr_fp16 + +/** + * Returns the linear blend of x & y implemented as: + * x + (y - x) * a + * a must be a value in the range 0.0 ... 1.0. If a is not + * in the range 0.0 ... 1.0, the return values are + * undefined. + */ +float __ovld __cnfn mix(float x, float y, float a); +float2 __ovld __cnfn mix(float2 x, float2 y, float2 a); +float3 __ovld __cnfn mix(float3 x, float3 y, float3 a); +float4 __ovld __cnfn mix(float4 x, float4 y, float4 a); +float8 __ovld __cnfn mix(float8 x, float8 y, float8 a); +float16 __ovld __cnfn mix(float16 x, float16 y, float16 a); +float2 __ovld __cnfn mix(float2 x, float2 y, float a); +float3 __ovld __cnfn mix(float3 x, float3 y, float a); +float4 __ovld __cnfn mix(float4 x, float4 y, float a); +float8 __ovld __cnfn mix(float8 x, float8 y, float a); +float16 __ovld __cnfn mix(float16 x, float16 y, float a); +#ifdef cl_khr_fp64 +double __ovld __cnfn mix(double x, double y, double a); +double2 __ovld __cnfn mix(double2 x, double2 y, double2 a); +double3 __ovld __cnfn mix(double3 x, double3 y, double3 a); +double4 __ovld __cnfn mix(double4 x, double4 y, double4 a); +double8 __ovld __cnfn mix(double8 x, double8 y, double8 a); +double16 __ovld __cnfn mix(double16 x, double16 y, double16 a); +double2 __ovld __cnfn mix(double2 x, double2 y, double a); +double3 __ovld __cnfn mix(double3 x, double3 y, double a); +double4 __ovld __cnfn mix(double4 x, double4 y, double a); +double8 __ovld __cnfn mix(double8 x, double8 y, double a); +double16 __ovld __cnfn mix(double16 x, double16 y, double a); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn mix(half x, half y, half a); +half2 __ovld __cnfn mix(half2 x, half2 y, half2 a); +half3 __ovld __cnfn mix(half3 x, half3 y, half3 a); +half4 __ovld __cnfn mix(half4 x, half4 y, half4 a); +half8 __ovld __cnfn mix(half8 x, half8 y, half8 a); +half16 __ovld __cnfn mix(half16 x, half16 y, half16 a); +half2 __ovld __cnfn mix(half2 x, half2 y, half a); +half3 __ovld __cnfn mix(half3 x, half3 y, half a); +half4 __ovld __cnfn mix(half4 x, half4 y, half a); +half8 __ovld __cnfn mix(half8 x, half8 y, half a); +half16 __ovld __cnfn mix(half16 x, half16 y, half a); +#endif //cl_khr_fp16 + +/** + * Converts degrees to radians, i.e. (PI / 180) * + * degrees. + */ +float __ovld __cnfn radians(float degrees); +float2 __ovld __cnfn radians(float2 degrees); +float3 __ovld __cnfn radians(float3 degrees); +float4 __ovld __cnfn radians(float4 degrees); +float8 __ovld __cnfn radians(float8 degrees); +float16 __ovld __cnfn radians(float16 degrees); +#ifdef cl_khr_fp64 +double __ovld __cnfn radians(double degrees); +double2 __ovld __cnfn radians(double2 degrees); +double3 __ovld __cnfn radians(double3 degrees); +double4 __ovld __cnfn radians(double4 degrees); +double8 __ovld __cnfn radians(double8 degrees); +double16 __ovld __cnfn radians(double16 degrees); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn radians(half degrees); +half2 __ovld __cnfn radians(half2 degrees); +half3 __ovld __cnfn radians(half3 degrees); +half4 __ovld __cnfn radians(half4 degrees); +half8 __ovld __cnfn radians(half8 degrees); +half16 __ovld __cnfn radians(half16 degrees); +#endif //cl_khr_fp16 + +/** + * Returns 0.0 if x < edge, otherwise it returns 1.0. + */ +float __ovld __cnfn step(float edge, float x); +float2 __ovld __cnfn step(float2 edge, float2 x); +float3 __ovld __cnfn step(float3 edge, float3 x); +float4 __ovld __cnfn step(float4 edge, float4 x); +float8 __ovld __cnfn step(float8 edge, float8 x); +float16 __ovld __cnfn step(float16 edge, float16 x); +float2 __ovld __cnfn step(float edge, float2 x); +float3 __ovld __cnfn step(float edge, float3 x); +float4 __ovld __cnfn step(float edge, float4 x); +float8 __ovld __cnfn step(float edge, float8 x); +float16 __ovld __cnfn step(float edge, float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn step(double edge, double x); +double2 __ovld __cnfn step(double2 edge, double2 x); +double3 __ovld __cnfn step(double3 edge, double3 x); +double4 __ovld __cnfn step(double4 edge, double4 x); +double8 __ovld __cnfn step(double8 edge, double8 x); +double16 __ovld __cnfn step(double16 edge, double16 x); +double2 __ovld __cnfn step(double edge, double2 x); +double3 __ovld __cnfn step(double edge, double3 x); +double4 __ovld __cnfn step(double edge, double4 x); +double8 __ovld __cnfn step(double edge, double8 x); +double16 __ovld __cnfn step(double edge, double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn step(half edge, half x); +half2 __ovld __cnfn step(half2 edge, half2 x); +half3 __ovld __cnfn step(half3 edge, half3 x); +half4 __ovld __cnfn step(half4 edge, half4 x); +half8 __ovld __cnfn step(half8 edge, half8 x); +half16 __ovld __cnfn step(half16 edge, half16 x); +half __ovld __cnfn step(half edge, half x); +half2 __ovld __cnfn step(half edge, half2 x); +half3 __ovld __cnfn step(half edge, half3 x); +half4 __ovld __cnfn step(half edge, half4 x); +half8 __ovld __cnfn step(half edge, half8 x); +half16 __ovld __cnfn step(half edge, half16 x); +#endif //cl_khr_fp16 + +/** + * Returns 0.0 if x <= edge0 and 1.0 if x >= edge1 and + * performs smooth Hermite interpolation between 0 + * and 1when edge0 < x < edge1. This is useful in + * cases where you would want a threshold function + * with a smooth transition. + * This is equivalent to: + * gentype t; + * t = clamp ((x - edge0) / (edge1 - edge0), 0, 1); + * return t * t * (3 - 2 * t); + * Results are undefined if edge0 >= edge1 or if x, + * edge0 or edge1 is a NaN. + */ +float __ovld __cnfn smoothstep(float edge0, float edge1, float x); +float2 __ovld __cnfn smoothstep(float2 edge0, float2 edge1, float2 x); +float3 __ovld __cnfn smoothstep(float3 edge0, float3 edge1, float3 x); +float4 __ovld __cnfn smoothstep(float4 edge0, float4 edge1, float4 x); +float8 __ovld __cnfn smoothstep(float8 edge0, float8 edge1, float8 x); +float16 __ovld __cnfn smoothstep(float16 edge0, float16 edge1, float16 x); +float2 __ovld __cnfn smoothstep(float edge0, float edge1, float2 x); +float3 __ovld __cnfn smoothstep(float edge0, float edge1, float3 x); +float4 __ovld __cnfn smoothstep(float edge0, float edge1, float4 x); +float8 __ovld __cnfn smoothstep(float edge0, float edge1, float8 x); +float16 __ovld __cnfn smoothstep(float edge0, float edge1, float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn smoothstep(double edge0, double edge1, double x); +double2 __ovld __cnfn smoothstep(double2 edge0, double2 edge1, double2 x); +double3 __ovld __cnfn smoothstep(double3 edge0, double3 edge1, double3 x); +double4 __ovld __cnfn smoothstep(double4 edge0, double4 edge1, double4 x); +double8 __ovld __cnfn smoothstep(double8 edge0, double8 edge1, double8 x); +double16 __ovld __cnfn smoothstep(double16 edge0, double16 edge1, double16 x); +double2 __ovld __cnfn smoothstep(double edge0, double edge1, double2 x); +double3 __ovld __cnfn smoothstep(double edge0, double edge1, double3 x); +double4 __ovld __cnfn smoothstep(double edge0, double edge1, double4 x); +double8 __ovld __cnfn smoothstep(double edge0, double edge1, double8 x); +double16 __ovld __cnfn smoothstep(double edge0, double edge1, double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn smoothstep(half edge0, half edge1, half x); +half2 __ovld __cnfn smoothstep(half2 edge0, half2 edge1, half2 x); +half3 __ovld __cnfn smoothstep(half3 edge0, half3 edge1, half3 x); +half4 __ovld __cnfn smoothstep(half4 edge0, half4 edge1, half4 x); +half8 __ovld __cnfn smoothstep(half8 edge0, half8 edge1, half8 x); +half16 __ovld __cnfn smoothstep(half16 edge0, half16 edge1, half16 x); +half __ovld __cnfn smoothstep(half edge0, half edge1, half x); +half2 __ovld __cnfn smoothstep(half edge0, half edge1, half2 x); +half3 __ovld __cnfn smoothstep(half edge0, half edge1, half3 x); +half4 __ovld __cnfn smoothstep(half edge0, half edge1, half4 x); +half8 __ovld __cnfn smoothstep(half edge0, half edge1, half8 x); +half16 __ovld __cnfn smoothstep(half edge0, half edge1, half16 x); +#endif //cl_khr_fp16 + +/** + * Returns 1.0 if x > 0, -0.0 if x = -0.0, +0.0 if x = + * +0.0, or -1.0 if x < 0. Returns 0.0 if x is a NaN. + */ +float __ovld __cnfn sign(float x); +float2 __ovld __cnfn sign(float2 x); +float3 __ovld __cnfn sign(float3 x); +float4 __ovld __cnfn sign(float4 x); +float8 __ovld __cnfn sign(float8 x); +float16 __ovld __cnfn sign(float16 x); +#ifdef cl_khr_fp64 +double __ovld __cnfn sign(double x); +double2 __ovld __cnfn sign(double2 x); +double3 __ovld __cnfn sign(double3 x); +double4 __ovld __cnfn sign(double4 x); +double8 __ovld __cnfn sign(double8 x); +double16 __ovld __cnfn sign(double16 x); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn sign(half x); +half2 __ovld __cnfn sign(half2 x); +half3 __ovld __cnfn sign(half3 x); +half4 __ovld __cnfn sign(half4 x); +half8 __ovld __cnfn sign(half8 x); +half16 __ovld __cnfn sign(half16 x); +#endif //cl_khr_fp16 + +// OpenCL v1.1 s6.11.5, v1.2 s6.12.5, v2.0 s6.13.5 - Geometric Functions + +/** + * Returns the cross product of p0.xyz and p1.xyz. The + * w component of float4 result returned will be 0.0. + */ +float4 __ovld __cnfn cross(float4 p0, float4 p1); +float3 __ovld __cnfn cross(float3 p0, float3 p1); +#ifdef cl_khr_fp64 +double4 __ovld __cnfn cross(double4 p0, double4 p1); +double3 __ovld __cnfn cross(double3 p0, double3 p1); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half4 __ovld __cnfn cross(half4 p0, half4 p1); +half3 __ovld __cnfn cross(half3 p0, half3 p1); +#endif //cl_khr_fp16 + +/** + * Compute dot product. + */ +float __ovld __cnfn dot(float p0, float p1); +float __ovld __cnfn dot(float2 p0, float2 p1); +float __ovld __cnfn dot(float3 p0, float3 p1); +float __ovld __cnfn dot(float4 p0, float4 p1); +#ifdef cl_khr_fp64 +double __ovld __cnfn dot(double p0, double p1); +double __ovld __cnfn dot(double2 p0, double2 p1); +double __ovld __cnfn dot(double3 p0, double3 p1); +double __ovld __cnfn dot(double4 p0, double4 p1); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn dot(half p0, half p1); +half __ovld __cnfn dot(half2 p0, half2 p1); +half __ovld __cnfn dot(half3 p0, half3 p1); +half __ovld __cnfn dot(half4 p0, half4 p1); +#endif //cl_khr_fp16 + +/** + * Returns the distance between p0 and p1. This is + * calculated as length(p0 - p1). + */ +float __ovld __cnfn distance(float p0, float p1); +float __ovld __cnfn distance(float2 p0, float2 p1); +float __ovld __cnfn distance(float3 p0, float3 p1); +float __ovld __cnfn distance(float4 p0, float4 p1); +#ifdef cl_khr_fp64 +double __ovld __cnfn distance(double p0, double p1); +double __ovld __cnfn distance(double2 p0, double2 p1); +double __ovld __cnfn distance(double3 p0, double3 p1); +double __ovld __cnfn distance(double4 p0, double4 p1); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn distance(half p0, half p1); +half __ovld __cnfn distance(half2 p0, half2 p1); +half __ovld __cnfn distance(half3 p0, half3 p1); +half __ovld __cnfn distance(half4 p0, half4 p1); +#endif //cl_khr_fp16 + +/** + * Return the length of vector p, i.e., + * sqrt(p.x2 + p.y 2 + ...) + */ +float __ovld __cnfn length(float p); +float __ovld __cnfn length(float2 p); +float __ovld __cnfn length(float3 p); +float __ovld __cnfn length(float4 p); +#ifdef cl_khr_fp64 +double __ovld __cnfn length(double p); +double __ovld __cnfn length(double2 p); +double __ovld __cnfn length(double3 p); +double __ovld __cnfn length(double4 p); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn length(half p); +half __ovld __cnfn length(half2 p); +half __ovld __cnfn length(half3 p); +half __ovld __cnfn length(half4 p); +#endif //cl_khr_fp16 + +/** + * Returns a vector in the same direction as p but with a + * length of 1. + */ +float __ovld __cnfn normalize(float p); +float2 __ovld __cnfn normalize(float2 p); +float3 __ovld __cnfn normalize(float3 p); +float4 __ovld __cnfn normalize(float4 p); +#ifdef cl_khr_fp64 +double __ovld __cnfn normalize(double p); +double2 __ovld __cnfn normalize(double2 p); +double3 __ovld __cnfn normalize(double3 p); +double4 __ovld __cnfn normalize(double4 p); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn normalize(half p); +half2 __ovld __cnfn normalize(half2 p); +half3 __ovld __cnfn normalize(half3 p); +half4 __ovld __cnfn normalize(half4 p); +#endif //cl_khr_fp16 + +/** + * Returns fast_length(p0 - p1). + */ +float __ovld __cnfn fast_distance(float p0, float p1); +float __ovld __cnfn fast_distance(float2 p0, float2 p1); +float __ovld __cnfn fast_distance(float3 p0, float3 p1); +float __ovld __cnfn fast_distance(float4 p0, float4 p1); +#ifdef cl_khr_fp16 +half __ovld __cnfn fast_distance(half p0, half p1); +half __ovld __cnfn fast_distance(half2 p0, half2 p1); +half __ovld __cnfn fast_distance(half3 p0, half3 p1); +half __ovld __cnfn fast_distance(half4 p0, half4 p1); +#endif //cl_khr_fp16 + +/** + * Returns the length of vector p computed as: + * half_sqrt(p.x2 + p.y2 + ...) + */ +float __ovld __cnfn fast_length(float p); +float __ovld __cnfn fast_length(float2 p); +float __ovld __cnfn fast_length(float3 p); +float __ovld __cnfn fast_length(float4 p); +#ifdef cl_khr_fp16 +half __ovld __cnfn fast_length(half p); +half __ovld __cnfn fast_length(half2 p); +half __ovld __cnfn fast_length(half3 p); +half __ovld __cnfn fast_length(half4 p); +#endif //cl_khr_fp16 + +/** + * Returns a vector in the same direction as p but with a + * length of 1. fast_normalize is computed as: + * p * half_rsqrt (p.x^2 + p.y^2 + ... ) + * The result shall be within 8192 ulps error from the + * infinitely precise result of + * if (all(p == 0.0f)) + * result = p; + * else + * result = p / sqrt (p.x^2 + p.y^2 + ...); + * with the following exceptions: + * 1) If the sum of squares is greater than FLT_MAX + * then the value of the floating-point values in the + * result vector are undefined. + * 2) If the sum of squares is less than FLT_MIN then + * the implementation may return back p. + * 3) If the device is in "denorms are flushed to zero" + * mode, individual operand elements with magnitude + * less than sqrt(FLT_MIN) may be flushed to zero + * before proceeding with the calculation. + */ +float __ovld __cnfn fast_normalize(float p); +float2 __ovld __cnfn fast_normalize(float2 p); +float3 __ovld __cnfn fast_normalize(float3 p); +float4 __ovld __cnfn fast_normalize(float4 p); +#ifdef cl_khr_fp16 +half __ovld __cnfn fast_normalize(half p); +half2 __ovld __cnfn fast_normalize(half2 p); +half3 __ovld __cnfn fast_normalize(half3 p); +half4 __ovld __cnfn fast_normalize(half4 p); +#endif //cl_khr_fp16 + +// OpenCL v1.1 s6.11.6, v1.2 s6.12.6, v2.0 s6.13.6 - Relational Functions + +/** + * intn isequal (floatn x, floatn y) + * Returns the component-wise compare of x == y. + */ +int __ovld __cnfn isequal(float x, float y); +int2 __ovld __cnfn isequal(float2 x, float2 y); +int3 __ovld __cnfn isequal(float3 x, float3 y); +int4 __ovld __cnfn isequal(float4 x, float4 y); +int8 __ovld __cnfn isequal(float8 x, float8 y); +int16 __ovld __cnfn isequal(float16 x, float16 y); +#ifdef cl_khr_fp64 +int __ovld __cnfn isequal(double x, double y); +long2 __ovld __cnfn isequal(double2 x, double2 y); +long3 __ovld __cnfn isequal(double3 x, double3 y); +long4 __ovld __cnfn isequal(double4 x, double4 y); +long8 __ovld __cnfn isequal(double8 x, double8 y); +long16 __ovld __cnfn isequal(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn isequal(half x, half y); +short2 __ovld __cnfn isequal(half2 x, half2 y); +short3 __ovld __cnfn isequal(half3 x, half3 y); +short4 __ovld __cnfn isequal(half4 x, half4 y); +short8 __ovld __cnfn isequal(half8 x, half8 y); +short16 __ovld __cnfn isequal(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Returns the component-wise compare of x != y. + */ +int __ovld __cnfn isnotequal(float x, float y); +int2 __ovld __cnfn isnotequal(float2 x, float2 y); +int3 __ovld __cnfn isnotequal(float3 x, float3 y); +int4 __ovld __cnfn isnotequal(float4 x, float4 y); +int8 __ovld __cnfn isnotequal(float8 x, float8 y); +int16 __ovld __cnfn isnotequal(float16 x, float16 y); +#ifdef cl_khr_fp64 +int __ovld __cnfn isnotequal(double x, double y); +long2 __ovld __cnfn isnotequal(double2 x, double2 y); +long3 __ovld __cnfn isnotequal(double3 x, double3 y); +long4 __ovld __cnfn isnotequal(double4 x, double4 y); +long8 __ovld __cnfn isnotequal(double8 x, double8 y); +long16 __ovld __cnfn isnotequal(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn isnotequal(half x, half y); +short2 __ovld __cnfn isnotequal(half2 x, half2 y); +short3 __ovld __cnfn isnotequal(half3 x, half3 y); +short4 __ovld __cnfn isnotequal(half4 x, half4 y); +short8 __ovld __cnfn isnotequal(half8 x, half8 y); +short16 __ovld __cnfn isnotequal(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Returns the component-wise compare of x > y. + */ +int __ovld __cnfn isgreater(float x, float y); +int2 __ovld __cnfn isgreater(float2 x, float2 y); +int3 __ovld __cnfn isgreater(float3 x, float3 y); +int4 __ovld __cnfn isgreater(float4 x, float4 y); +int8 __ovld __cnfn isgreater(float8 x, float8 y); +int16 __ovld __cnfn isgreater(float16 x, float16 y); +#ifdef cl_khr_fp64 +int __ovld __cnfn isgreater(double x, double y); +long2 __ovld __cnfn isgreater(double2 x, double2 y); +long3 __ovld __cnfn isgreater(double3 x, double3 y); +long4 __ovld __cnfn isgreater(double4 x, double4 y); +long8 __ovld __cnfn isgreater(double8 x, double8 y); +long16 __ovld __cnfn isgreater(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn isgreater(half x, half y); +short2 __ovld __cnfn isgreater(half2 x, half2 y); +short3 __ovld __cnfn isgreater(half3 x, half3 y); +short4 __ovld __cnfn isgreater(half4 x, half4 y); +short8 __ovld __cnfn isgreater(half8 x, half8 y); +short16 __ovld __cnfn isgreater(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Returns the component-wise compare of x >= y. + */ +int __ovld __cnfn isgreaterequal(float x, float y); +int2 __ovld __cnfn isgreaterequal(float2 x, float2 y); +int3 __ovld __cnfn isgreaterequal(float3 x, float3 y); +int4 __ovld __cnfn isgreaterequal(float4 x, float4 y); +int8 __ovld __cnfn isgreaterequal(float8 x, float8 y); +int16 __ovld __cnfn isgreaterequal(float16 x, float16 y); +#ifdef cl_khr_fp64 +int __ovld __cnfn isgreaterequal(double x, double y); +long2 __ovld __cnfn isgreaterequal(double2 x, double2 y); +long3 __ovld __cnfn isgreaterequal(double3 x, double3 y); +long4 __ovld __cnfn isgreaterequal(double4 x, double4 y); +long8 __ovld __cnfn isgreaterequal(double8 x, double8 y); +long16 __ovld __cnfn isgreaterequal(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn isgreaterequal(half x, half y); +short2 __ovld __cnfn isgreaterequal(half2 x, half2 y); +short3 __ovld __cnfn isgreaterequal(half3 x, half3 y); +short4 __ovld __cnfn isgreaterequal(half4 x, half4 y); +short8 __ovld __cnfn isgreaterequal(half8 x, half8 y); +short16 __ovld __cnfn isgreaterequal(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Returns the component-wise compare of x < y. + */ +int __ovld __cnfn isless(float x, float y); +int2 __ovld __cnfn isless(float2 x, float2 y); +int3 __ovld __cnfn isless(float3 x, float3 y); +int4 __ovld __cnfn isless(float4 x, float4 y); +int8 __ovld __cnfn isless(float8 x, float8 y); +int16 __ovld __cnfn isless(float16 x, float16 y); +#ifdef cl_khr_fp64 +int __ovld __cnfn isless(double x, double y); +long2 __ovld __cnfn isless(double2 x, double2 y); +long3 __ovld __cnfn isless(double3 x, double3 y); +long4 __ovld __cnfn isless(double4 x, double4 y); +long8 __ovld __cnfn isless(double8 x, double8 y); +long16 __ovld __cnfn isless(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn isless(half x, half y); +short2 __ovld __cnfn isless(half2 x, half2 y); +short3 __ovld __cnfn isless(half3 x, half3 y); +short4 __ovld __cnfn isless(half4 x, half4 y); +short8 __ovld __cnfn isless(half8 x, half8 y); +short16 __ovld __cnfn isless(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Returns the component-wise compare of x <= y. + */ +int __ovld __cnfn islessequal(float x, float y); +int2 __ovld __cnfn islessequal(float2 x, float2 y); +int3 __ovld __cnfn islessequal(float3 x, float3 y); +int4 __ovld __cnfn islessequal(float4 x, float4 y); +int8 __ovld __cnfn islessequal(float8 x, float8 y); +int16 __ovld __cnfn islessequal(float16 x, float16 y); +#ifdef cl_khr_fp64 +int __ovld __cnfn islessequal(double x, double y); +long2 __ovld __cnfn islessequal(double2 x, double2 y); +long3 __ovld __cnfn islessequal(double3 x, double3 y); +long4 __ovld __cnfn islessequal(double4 x, double4 y); +long8 __ovld __cnfn islessequal(double8 x, double8 y); +long16 __ovld __cnfn islessequal(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn islessequal(half x, half y); +short2 __ovld __cnfn islessequal(half2 x, half2 y); +short3 __ovld __cnfn islessequal(half3 x, half3 y); +short4 __ovld __cnfn islessequal(half4 x, half4 y); +short8 __ovld __cnfn islessequal(half8 x, half8 y); +short16 __ovld __cnfn islessequal(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Returns the component-wise compare of + * (x < y) || (x > y) . + */ +int __ovld __cnfn islessgreater(float x, float y); +int2 __ovld __cnfn islessgreater(float2 x, float2 y); +int3 __ovld __cnfn islessgreater(float3 x, float3 y); +int4 __ovld __cnfn islessgreater(float4 x, float4 y); +int8 __ovld __cnfn islessgreater(float8 x, float8 y); +int16 __ovld __cnfn islessgreater(float16 x, float16 y); +#ifdef cl_khr_fp64 +int __ovld __cnfn islessgreater(double x, double y); +long2 __ovld __cnfn islessgreater(double2 x, double2 y); +long3 __ovld __cnfn islessgreater(double3 x, double3 y); +long4 __ovld __cnfn islessgreater(double4 x, double4 y); +long8 __ovld __cnfn islessgreater(double8 x, double8 y); +long16 __ovld __cnfn islessgreater(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn islessgreater(half x, half y); +short2 __ovld __cnfn islessgreater(half2 x, half2 y); +short3 __ovld __cnfn islessgreater(half3 x, half3 y); +short4 __ovld __cnfn islessgreater(half4 x, half4 y); +short8 __ovld __cnfn islessgreater(half8 x, half8 y); +short16 __ovld __cnfn islessgreater(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Test for finite value. + */ +int __ovld __cnfn isfinite(float); +int2 __ovld __cnfn isfinite(float2); +int3 __ovld __cnfn isfinite(float3); +int4 __ovld __cnfn isfinite(float4); +int8 __ovld __cnfn isfinite(float8); +int16 __ovld __cnfn isfinite(float16); +#ifdef cl_khr_fp64 +int __ovld __cnfn isfinite(double); +long2 __ovld __cnfn isfinite(double2); +long3 __ovld __cnfn isfinite(double3); +long4 __ovld __cnfn isfinite(double4); +long8 __ovld __cnfn isfinite(double8); +long16 __ovld __cnfn isfinite(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn isfinite(half); +short2 __ovld __cnfn isfinite(half2); +short3 __ovld __cnfn isfinite(half3); +short4 __ovld __cnfn isfinite(half4); +short8 __ovld __cnfn isfinite(half8); +short16 __ovld __cnfn isfinite(half16); +#endif //cl_khr_fp16 + +/** + * Test for infinity value (+ve or -ve) . + */ +int __ovld __cnfn isinf(float); +int2 __ovld __cnfn isinf(float2); +int3 __ovld __cnfn isinf(float3); +int4 __ovld __cnfn isinf(float4); +int8 __ovld __cnfn isinf(float8); +int16 __ovld __cnfn isinf(float16); +#ifdef cl_khr_fp64 +int __ovld __cnfn isinf(double); +long2 __ovld __cnfn isinf(double2); +long3 __ovld __cnfn isinf(double3); +long4 __ovld __cnfn isinf(double4); +long8 __ovld __cnfn isinf(double8); +long16 __ovld __cnfn isinf(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn isinf(half); +short2 __ovld __cnfn isinf(half2); +short3 __ovld __cnfn isinf(half3); +short4 __ovld __cnfn isinf(half4); +short8 __ovld __cnfn isinf(half8); +short16 __ovld __cnfn isinf(half16); +#endif //cl_khr_fp16 + +/** + * Test for a NaN. + */ +int __ovld __cnfn isnan(float); +int2 __ovld __cnfn isnan(float2); +int3 __ovld __cnfn isnan(float3); +int4 __ovld __cnfn isnan(float4); +int8 __ovld __cnfn isnan(float8); +int16 __ovld __cnfn isnan(float16); +#ifdef cl_khr_fp64 +int __ovld __cnfn isnan(double); +long2 __ovld __cnfn isnan(double2); +long3 __ovld __cnfn isnan(double3); +long4 __ovld __cnfn isnan(double4); +long8 __ovld __cnfn isnan(double8); +long16 __ovld __cnfn isnan(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn isnan(half); +short2 __ovld __cnfn isnan(half2); +short3 __ovld __cnfn isnan(half3); +short4 __ovld __cnfn isnan(half4); +short8 __ovld __cnfn isnan(half8); +short16 __ovld __cnfn isnan(half16); +#endif //cl_khr_fp16 + +/** + * Test for a normal value. + */ +int __ovld __cnfn isnormal(float); +int2 __ovld __cnfn isnormal(float2); +int3 __ovld __cnfn isnormal(float3); +int4 __ovld __cnfn isnormal(float4); +int8 __ovld __cnfn isnormal(float8); +int16 __ovld __cnfn isnormal(float16); +#ifdef cl_khr_fp64 +int __ovld __cnfn isnormal(double); +long2 __ovld __cnfn isnormal(double2); +long3 __ovld __cnfn isnormal(double3); +long4 __ovld __cnfn isnormal(double4); +long8 __ovld __cnfn isnormal(double8); +long16 __ovld __cnfn isnormal(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn isnormal(half); +short2 __ovld __cnfn isnormal(half2); +short3 __ovld __cnfn isnormal(half3); +short4 __ovld __cnfn isnormal(half4); +short8 __ovld __cnfn isnormal(half8); +short16 __ovld __cnfn isnormal(half16); +#endif //cl_khr_fp16 + +/** + * Test if arguments are ordered. isordered() takes + * arguments x and y, and returns the result + * isequal(x, x) && isequal(y, y). + */ +int __ovld __cnfn isordered(float x, float y); +int2 __ovld __cnfn isordered(float2 x, float2 y); +int3 __ovld __cnfn isordered(float3 x, float3 y); +int4 __ovld __cnfn isordered(float4 x, float4 y); +int8 __ovld __cnfn isordered(float8 x, float8 y); +int16 __ovld __cnfn isordered(float16 x, float16 y); +#ifdef cl_khr_fp64 +int __ovld __cnfn isordered(double x, double y); +long2 __ovld __cnfn isordered(double2 x, double2 y); +long3 __ovld __cnfn isordered(double3 x, double3 y); +long4 __ovld __cnfn isordered(double4 x, double4 y); +long8 __ovld __cnfn isordered(double8 x, double8 y); +long16 __ovld __cnfn isordered(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn isordered(half x, half y); +short2 __ovld __cnfn isordered(half2 x, half2 y); +short3 __ovld __cnfn isordered(half3 x, half3 y); +short4 __ovld __cnfn isordered(half4 x, half4 y); +short8 __ovld __cnfn isordered(half8 x, half8 y); +short16 __ovld __cnfn isordered(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Test if arguments are unordered. isunordered() + * takes arguments x and y, returning non-zero if x or y + * is NaN, and zero otherwise. + */ +int __ovld __cnfn isunordered(float x, float y); +int2 __ovld __cnfn isunordered(float2 x, float2 y); +int3 __ovld __cnfn isunordered(float3 x, float3 y); +int4 __ovld __cnfn isunordered(float4 x, float4 y); +int8 __ovld __cnfn isunordered(float8 x, float8 y); +int16 __ovld __cnfn isunordered(float16 x, float16 y); +#ifdef cl_khr_fp64 +int __ovld __cnfn isunordered(double x, double y); +long2 __ovld __cnfn isunordered(double2 x, double2 y); +long3 __ovld __cnfn isunordered(double3 x, double3 y); +long4 __ovld __cnfn isunordered(double4 x, double4 y); +long8 __ovld __cnfn isunordered(double8 x, double8 y); +long16 __ovld __cnfn isunordered(double16 x, double16 y); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn isunordered(half x, half y); +short2 __ovld __cnfn isunordered(half2 x, half2 y); +short3 __ovld __cnfn isunordered(half3 x, half3 y); +short4 __ovld __cnfn isunordered(half4 x, half4 y); +short8 __ovld __cnfn isunordered(half8 x, half8 y); +short16 __ovld __cnfn isunordered(half16 x, half16 y); +#endif //cl_khr_fp16 + +/** + * Test for sign bit. The scalar version of the function + * returns a 1 if the sign bit in the float is set else returns + * 0. The vector version of the function returns the + * following for each component in floatn: a -1 if the + * sign bit in the float is set else returns 0. + */ +int __ovld __cnfn signbit(float); +int2 __ovld __cnfn signbit(float2); +int3 __ovld __cnfn signbit(float3); +int4 __ovld __cnfn signbit(float4); +int8 __ovld __cnfn signbit(float8); +int16 __ovld __cnfn signbit(float16); +#ifdef cl_khr_fp64 +int __ovld __cnfn signbit(double); +long2 __ovld __cnfn signbit(double2); +long3 __ovld __cnfn signbit(double3); +long4 __ovld __cnfn signbit(double4); +long8 __ovld __cnfn signbit(double8); +long16 __ovld __cnfn signbit(double16); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +int __ovld __cnfn signbit(half); +short2 __ovld __cnfn signbit(half2); +short3 __ovld __cnfn signbit(half3); +short4 __ovld __cnfn signbit(half4); +short8 __ovld __cnfn signbit(half8); +short16 __ovld __cnfn signbit(half16); +#endif //cl_khr_fp16 + +/** + * Returns 1 if the most significant bit in any component + * of x is set; otherwise returns 0. + */ +int __ovld __cnfn any(char x); +int __ovld __cnfn any(char2 x); +int __ovld __cnfn any(char3 x); +int __ovld __cnfn any(char4 x); +int __ovld __cnfn any(char8 x); +int __ovld __cnfn any(char16 x); +int __ovld __cnfn any(short x); +int __ovld __cnfn any(short2 x); +int __ovld __cnfn any(short3 x); +int __ovld __cnfn any(short4 x); +int __ovld __cnfn any(short8 x); +int __ovld __cnfn any(short16 x); +int __ovld __cnfn any(int x); +int __ovld __cnfn any(int2 x); +int __ovld __cnfn any(int3 x); +int __ovld __cnfn any(int4 x); +int __ovld __cnfn any(int8 x); +int __ovld __cnfn any(int16 x); +int __ovld __cnfn any(long x); +int __ovld __cnfn any(long2 x); +int __ovld __cnfn any(long3 x); +int __ovld __cnfn any(long4 x); +int __ovld __cnfn any(long8 x); +int __ovld __cnfn any(long16 x); + +/** + * Returns 1 if the most significant bit in all components + * of x is set; otherwise returns 0. + */ +int __ovld __cnfn all(char x); +int __ovld __cnfn all(char2 x); +int __ovld __cnfn all(char3 x); +int __ovld __cnfn all(char4 x); +int __ovld __cnfn all(char8 x); +int __ovld __cnfn all(char16 x); +int __ovld __cnfn all(short x); +int __ovld __cnfn all(short2 x); +int __ovld __cnfn all(short3 x); +int __ovld __cnfn all(short4 x); +int __ovld __cnfn all(short8 x); +int __ovld __cnfn all(short16 x); +int __ovld __cnfn all(int x); +int __ovld __cnfn all(int2 x); +int __ovld __cnfn all(int3 x); +int __ovld __cnfn all(int4 x); +int __ovld __cnfn all(int8 x); +int __ovld __cnfn all(int16 x); +int __ovld __cnfn all(long x); +int __ovld __cnfn all(long2 x); +int __ovld __cnfn all(long3 x); +int __ovld __cnfn all(long4 x); +int __ovld __cnfn all(long8 x); +int __ovld __cnfn all(long16 x); + +/** + * Each bit of the result is the corresponding bit of a if + * the corresponding bit of c is 0. Otherwise it is the + * corresponding bit of b. + */ +char __ovld __cnfn bitselect(char a, char b, char c); +uchar __ovld __cnfn bitselect(uchar a, uchar b, uchar c); +char2 __ovld __cnfn bitselect(char2 a, char2 b, char2 c); +uchar2 __ovld __cnfn bitselect(uchar2 a, uchar2 b, uchar2 c); +char3 __ovld __cnfn bitselect(char3 a, char3 b, char3 c); +uchar3 __ovld __cnfn bitselect(uchar3 a, uchar3 b, uchar3 c); +char4 __ovld __cnfn bitselect(char4 a, char4 b, char4 c); +uchar4 __ovld __cnfn bitselect(uchar4 a, uchar4 b, uchar4 c); +char8 __ovld __cnfn bitselect(char8 a, char8 b, char8 c); +uchar8 __ovld __cnfn bitselect(uchar8 a, uchar8 b, uchar8 c); +char16 __ovld __cnfn bitselect(char16 a, char16 b, char16 c); +uchar16 __ovld __cnfn bitselect(uchar16 a, uchar16 b, uchar16 c); +short __ovld __cnfn bitselect(short a, short b, short c); +ushort __ovld __cnfn bitselect(ushort a, ushort b, ushort c); +short2 __ovld __cnfn bitselect(short2 a, short2 b, short2 c); +ushort2 __ovld __cnfn bitselect(ushort2 a, ushort2 b, ushort2 c); +short3 __ovld __cnfn bitselect(short3 a, short3 b, short3 c); +ushort3 __ovld __cnfn bitselect(ushort3 a, ushort3 b, ushort3 c); +short4 __ovld __cnfn bitselect(short4 a, short4 b, short4 c); +ushort4 __ovld __cnfn bitselect(ushort4 a, ushort4 b, ushort4 c); +short8 __ovld __cnfn bitselect(short8 a, short8 b, short8 c); +ushort8 __ovld __cnfn bitselect(ushort8 a, ushort8 b, ushort8 c); +short16 __ovld __cnfn bitselect(short16 a, short16 b, short16 c); +ushort16 __ovld __cnfn bitselect(ushort16 a, ushort16 b, ushort16 c); +int __ovld __cnfn bitselect(int a, int b, int c); +uint __ovld __cnfn bitselect(uint a, uint b, uint c); +int2 __ovld __cnfn bitselect(int2 a, int2 b, int2 c); +uint2 __ovld __cnfn bitselect(uint2 a, uint2 b, uint2 c); +int3 __ovld __cnfn bitselect(int3 a, int3 b, int3 c); +uint3 __ovld __cnfn bitselect(uint3 a, uint3 b, uint3 c); +int4 __ovld __cnfn bitselect(int4 a, int4 b, int4 c); +uint4 __ovld __cnfn bitselect(uint4 a, uint4 b, uint4 c); +int8 __ovld __cnfn bitselect(int8 a, int8 b, int8 c); +uint8 __ovld __cnfn bitselect(uint8 a, uint8 b, uint8 c); +int16 __ovld __cnfn bitselect(int16 a, int16 b, int16 c); +uint16 __ovld __cnfn bitselect(uint16 a, uint16 b, uint16 c); +long __ovld __cnfn bitselect(long a, long b, long c); +ulong __ovld __cnfn bitselect(ulong a, ulong b, ulong c); +long2 __ovld __cnfn bitselect(long2 a, long2 b, long2 c); +ulong2 __ovld __cnfn bitselect(ulong2 a, ulong2 b, ulong2 c); +long3 __ovld __cnfn bitselect(long3 a, long3 b, long3 c); +ulong3 __ovld __cnfn bitselect(ulong3 a, ulong3 b, ulong3 c); +long4 __ovld __cnfn bitselect(long4 a, long4 b, long4 c); +ulong4 __ovld __cnfn bitselect(ulong4 a, ulong4 b, ulong4 c); +long8 __ovld __cnfn bitselect(long8 a, long8 b, long8 c); +ulong8 __ovld __cnfn bitselect(ulong8 a, ulong8 b, ulong8 c); +long16 __ovld __cnfn bitselect(long16 a, long16 b, long16 c); +ulong16 __ovld __cnfn bitselect(ulong16 a, ulong16 b, ulong16 c); +float __ovld __cnfn bitselect(float a, float b, float c); +float2 __ovld __cnfn bitselect(float2 a, float2 b, float2 c); +float3 __ovld __cnfn bitselect(float3 a, float3 b, float3 c); +float4 __ovld __cnfn bitselect(float4 a, float4 b, float4 c); +float8 __ovld __cnfn bitselect(float8 a, float8 b, float8 c); +float16 __ovld __cnfn bitselect(float16 a, float16 b, float16 c); +#ifdef cl_khr_fp64 +double __ovld __cnfn bitselect(double a, double b, double c); +double2 __ovld __cnfn bitselect(double2 a, double2 b, double2 c); +double3 __ovld __cnfn bitselect(double3 a, double3 b, double3 c); +double4 __ovld __cnfn bitselect(double4 a, double4 b, double4 c); +double8 __ovld __cnfn bitselect(double8 a, double8 b, double8 c); +double16 __ovld __cnfn bitselect(double16 a, double16 b, double16 c); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn bitselect(half a, half b, half c); +half2 __ovld __cnfn bitselect(half2 a, half2 b, half2 c); +half3 __ovld __cnfn bitselect(half3 a, half3 b, half3 c); +half4 __ovld __cnfn bitselect(half4 a, half4 b, half4 c); +half8 __ovld __cnfn bitselect(half8 a, half8 b, half8 c); +half16 __ovld __cnfn bitselect(half16 a, half16 b, half16 c); +#endif //cl_khr_fp16 + +/** + * For each component of a vector type, + * result[i] = if MSB of c[i] is set ? b[i] : a[i]. + * For a scalar type, result = c ? b : a. + */ +char __ovld __cnfn select(char a, char b, char c); +uchar __ovld __cnfn select(uchar a, uchar b, char c); +char2 __ovld __cnfn select(char2 a, char2 b, char2 c); +uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, char2 c); +char3 __ovld __cnfn select(char3 a, char3 b, char3 c); +uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, char3 c); +char4 __ovld __cnfn select(char4 a, char4 b, char4 c); +uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, char4 c); +char8 __ovld __cnfn select(char8 a, char8 b, char8 c); +uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, char8 c); +char16 __ovld __cnfn select(char16 a, char16 b, char16 c); +uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, char16 c); +short __ovld __cnfn select(short a, short b, char c); +ushort __ovld __cnfn select(ushort a, ushort b, char c); +short2 __ovld __cnfn select(short2 a, short2 b, char2 c); +ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, char2 c); +short3 __ovld __cnfn select(short3 a, short3 b, char3 c); +ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, char3 c); +short4 __ovld __cnfn select(short4 a, short4 b, char4 c); +ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, char4 c); +short8 __ovld __cnfn select(short8 a, short8 b, char8 c); +ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, char8 c); +short16 __ovld __cnfn select(short16 a, short16 b, char16 c); +ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, char16 c); +int __ovld __cnfn select(int a, int b, char c); +uint __ovld __cnfn select(uint a, uint b, char c); +int2 __ovld __cnfn select(int2 a, int2 b, char2 c); +uint2 __ovld __cnfn select(uint2 a, uint2 b, char2 c); +int3 __ovld __cnfn select(int3 a, int3 b, char3 c); +uint3 __ovld __cnfn select(uint3 a, uint3 b, char3 c); +int4 __ovld __cnfn select(int4 a, int4 b, char4 c); +uint4 __ovld __cnfn select(uint4 a, uint4 b, char4 c); +int8 __ovld __cnfn select(int8 a, int8 b, char8 c); +uint8 __ovld __cnfn select(uint8 a, uint8 b, char8 c); +int16 __ovld __cnfn select(int16 a, int16 b, char16 c); +uint16 __ovld __cnfn select(uint16 a, uint16 b, char16 c); +long __ovld __cnfn select(long a, long b, char c); +ulong __ovld __cnfn select(ulong a, ulong b, char c); +long2 __ovld __cnfn select(long2 a, long2 b, char2 c); +ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, char2 c); +long3 __ovld __cnfn select(long3 a, long3 b, char3 c); +ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, char3 c); +long4 __ovld __cnfn select(long4 a, long4 b, char4 c); +ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, char4 c); +long8 __ovld __cnfn select(long8 a, long8 b, char8 c); +ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, char8 c); +long16 __ovld __cnfn select(long16 a, long16 b, char16 c); +ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, char16 c); +float __ovld __cnfn select(float a, float b, char c); +float2 __ovld __cnfn select(float2 a, float2 b, char2 c); +float3 __ovld __cnfn select(float3 a, float3 b, char3 c); +float4 __ovld __cnfn select(float4 a, float4 b, char4 c); +float8 __ovld __cnfn select(float8 a, float8 b, char8 c); +float16 __ovld __cnfn select(float16 a, float16 b, char16 c); +char __ovld __cnfn select(char a, char b, short c); +uchar __ovld __cnfn select(uchar a, uchar b, short c); +char2 __ovld __cnfn select(char2 a, char2 b, short2 c); +uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, short2 c); +char3 __ovld __cnfn select(char3 a, char3 b, short3 c); +uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, short3 c); +char4 __ovld __cnfn select(char4 a, char4 b, short4 c); +uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, short4 c); +char8 __ovld __cnfn select(char8 a, char8 b, short8 c); +uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, short8 c); +char16 __ovld __cnfn select(char16 a, char16 b, short16 c); +uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, short16 c); +short __ovld __cnfn select(short a, short b, short c); +ushort __ovld __cnfn select(ushort a, ushort b, short c); +short2 __ovld __cnfn select(short2 a, short2 b, short2 c); +ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, short2 c); +short3 __ovld __cnfn select(short3 a, short3 b, short3 c); +ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, short3 c); +short4 __ovld __cnfn select(short4 a, short4 b, short4 c); +ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, short4 c); +short8 __ovld __cnfn select(short8 a, short8 b, short8 c); +ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, short8 c); +short16 __ovld __cnfn select(short16 a, short16 b, short16 c); +ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, short16 c); +int __ovld __cnfn select(int a, int b, short c); +uint __ovld __cnfn select(uint a, uint b, short c); +int2 __ovld __cnfn select(int2 a, int2 b, short2 c); +uint2 __ovld __cnfn select(uint2 a, uint2 b, short2 c); +int3 __ovld __cnfn select(int3 a, int3 b, short3 c); +uint3 __ovld __cnfn select(uint3 a, uint3 b, short3 c); +int4 __ovld __cnfn select(int4 a, int4 b, short4 c); +uint4 __ovld __cnfn select(uint4 a, uint4 b, short4 c); +int8 __ovld __cnfn select(int8 a, int8 b, short8 c); +uint8 __ovld __cnfn select(uint8 a, uint8 b, short8 c); +int16 __ovld __cnfn select(int16 a, int16 b, short16 c); +uint16 __ovld __cnfn select(uint16 a, uint16 b, short16 c); +long __ovld __cnfn select(long a, long b, short c); +ulong __ovld __cnfn select(ulong a, ulong b, short c); +long2 __ovld __cnfn select(long2 a, long2 b, short2 c); +ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, short2 c); +long3 __ovld __cnfn select(long3 a, long3 b, short3 c); +ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, short3 c); +long4 __ovld __cnfn select(long4 a, long4 b, short4 c); +ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, short4 c); +long8 __ovld __cnfn select(long8 a, long8 b, short8 c); +ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, short8 c); +long16 __ovld __cnfn select(long16 a, long16 b, short16 c); +ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, short16 c); +float __ovld __cnfn select(float a, float b, short c); +float2 __ovld __cnfn select(float2 a, float2 b, short2 c); +float3 __ovld __cnfn select(float3 a, float3 b, short3 c); +float4 __ovld __cnfn select(float4 a, float4 b, short4 c); +float8 __ovld __cnfn select(float8 a, float8 b, short8 c); +float16 __ovld __cnfn select(float16 a, float16 b, short16 c); +char __ovld __cnfn select(char a, char b, int c); +uchar __ovld __cnfn select(uchar a, uchar b, int c); +char2 __ovld __cnfn select(char2 a, char2 b, int2 c); +uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, int2 c); +char3 __ovld __cnfn select(char3 a, char3 b, int3 c); +uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, int3 c); +char4 __ovld __cnfn select(char4 a, char4 b, int4 c); +uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, int4 c); +char8 __ovld __cnfn select(char8 a, char8 b, int8 c); +uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, int8 c); +char16 __ovld __cnfn select(char16 a, char16 b, int16 c); +uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, int16 c); +short __ovld __cnfn select(short a, short b, int c); +ushort __ovld __cnfn select(ushort a, ushort b, int c); +short2 __ovld __cnfn select(short2 a, short2 b, int2 c); +ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, int2 c); +short3 __ovld __cnfn select(short3 a, short3 b, int3 c); +ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, int3 c); +short4 __ovld __cnfn select(short4 a, short4 b, int4 c); +ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, int4 c); +short8 __ovld __cnfn select(short8 a, short8 b, int8 c); +ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, int8 c); +short16 __ovld __cnfn select(short16 a, short16 b, int16 c); +ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, int16 c); +int __ovld __cnfn select(int a, int b, int c); +uint __ovld __cnfn select(uint a, uint b, int c); +int2 __ovld __cnfn select(int2 a, int2 b, int2 c); +uint2 __ovld __cnfn select(uint2 a, uint2 b, int2 c); +int3 __ovld __cnfn select(int3 a, int3 b, int3 c); +uint3 __ovld __cnfn select(uint3 a, uint3 b, int3 c); +int4 __ovld __cnfn select(int4 a, int4 b, int4 c); +uint4 __ovld __cnfn select(uint4 a, uint4 b, int4 c); +int8 __ovld __cnfn select(int8 a, int8 b, int8 c); +uint8 __ovld __cnfn select(uint8 a, uint8 b, int8 c); +int16 __ovld __cnfn select(int16 a, int16 b, int16 c); +uint16 __ovld __cnfn select(uint16 a, uint16 b, int16 c); +long __ovld __cnfn select(long a, long b, int c); +ulong __ovld __cnfn select(ulong a, ulong b, int c); +long2 __ovld __cnfn select(long2 a, long2 b, int2 c); +ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, int2 c); +long3 __ovld __cnfn select(long3 a, long3 b, int3 c); +ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, int3 c); +long4 __ovld __cnfn select(long4 a, long4 b, int4 c); +ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, int4 c); +long8 __ovld __cnfn select(long8 a, long8 b, int8 c); +ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, int8 c); +long16 __ovld __cnfn select(long16 a, long16 b, int16 c); +ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, int16 c); +float __ovld __cnfn select(float a, float b, int c); +float2 __ovld __cnfn select(float2 a, float2 b, int2 c); +float3 __ovld __cnfn select(float3 a, float3 b, int3 c); +float4 __ovld __cnfn select(float4 a, float4 b, int4 c); +float8 __ovld __cnfn select(float8 a, float8 b, int8 c); +float16 __ovld __cnfn select(float16 a, float16 b, int16 c); +char __ovld __cnfn select(char a, char b, long c); +uchar __ovld __cnfn select(uchar a, uchar b, long c); +char2 __ovld __cnfn select(char2 a, char2 b, long2 c); +uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, long2 c); +char3 __ovld __cnfn select(char3 a, char3 b, long3 c); +uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, long3 c); +char4 __ovld __cnfn select(char4 a, char4 b, long4 c); +uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, long4 c); +char8 __ovld __cnfn select(char8 a, char8 b, long8 c); +uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, long8 c); +char16 __ovld __cnfn select(char16 a, char16 b, long16 c); +uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, long16 c); +short __ovld __cnfn select(short a, short b, long c); +ushort __ovld __cnfn select(ushort a, ushort b, long c); +short2 __ovld __cnfn select(short2 a, short2 b, long2 c); +ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, long2 c); +short3 __ovld __cnfn select(short3 a, short3 b, long3 c); +ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, long3 c); +short4 __ovld __cnfn select(short4 a, short4 b, long4 c); +ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, long4 c); +short8 __ovld __cnfn select(short8 a, short8 b, long8 c); +ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, long8 c); +short16 __ovld __cnfn select(short16 a, short16 b, long16 c); +ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, long16 c); +int __ovld __cnfn select(int a, int b, long c); +uint __ovld __cnfn select(uint a, uint b, long c); +int2 __ovld __cnfn select(int2 a, int2 b, long2 c); +uint2 __ovld __cnfn select(uint2 a, uint2 b, long2 c); +int3 __ovld __cnfn select(int3 a, int3 b, long3 c); +uint3 __ovld __cnfn select(uint3 a, uint3 b, long3 c); +int4 __ovld __cnfn select(int4 a, int4 b, long4 c); +uint4 __ovld __cnfn select(uint4 a, uint4 b, long4 c); +int8 __ovld __cnfn select(int8 a, int8 b, long8 c); +uint8 __ovld __cnfn select(uint8 a, uint8 b, long8 c); +int16 __ovld __cnfn select(int16 a, int16 b, long16 c); +uint16 __ovld __cnfn select(uint16 a, uint16 b, long16 c); +long __ovld __cnfn select(long a, long b, long c); +ulong __ovld __cnfn select(ulong a, ulong b, long c); +long2 __ovld __cnfn select(long2 a, long2 b, long2 c); +ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, long2 c); +long3 __ovld __cnfn select(long3 a, long3 b, long3 c); +ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, long3 c); +long4 __ovld __cnfn select(long4 a, long4 b, long4 c); +ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, long4 c); +long8 __ovld __cnfn select(long8 a, long8 b, long8 c); +ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, long8 c); +long16 __ovld __cnfn select(long16 a, long16 b, long16 c); +ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, long16 c); +float __ovld __cnfn select(float a, float b, long c); +float2 __ovld __cnfn select(float2 a, float2 b, long2 c); +float3 __ovld __cnfn select(float3 a, float3 b, long3 c); +float4 __ovld __cnfn select(float4 a, float4 b, long4 c); +float8 __ovld __cnfn select(float8 a, float8 b, long8 c); +float16 __ovld __cnfn select(float16 a, float16 b, long16 c); +char __ovld __cnfn select(char a, char b, uchar c); +uchar __ovld __cnfn select(uchar a, uchar b, uchar c); +char2 __ovld __cnfn select(char2 a, char2 b, uchar2 c); +uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, uchar2 c); +char3 __ovld __cnfn select(char3 a, char3 b, uchar3 c); +uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, uchar3 c); +char4 __ovld __cnfn select(char4 a, char4 b, uchar4 c); +uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, uchar4 c); +char8 __ovld __cnfn select(char8 a, char8 b, uchar8 c); +uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, uchar8 c); +char16 __ovld __cnfn select(char16 a, char16 b, uchar16 c); +uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, uchar16 c); +short __ovld __cnfn select(short a, short b, uchar c); +ushort __ovld __cnfn select(ushort a, ushort b, uchar c); +short2 __ovld __cnfn select(short2 a, short2 b, uchar2 c); +ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, uchar2 c); +short3 __ovld __cnfn select(short3 a, short3 b, uchar3 c); +ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, uchar3 c); +short4 __ovld __cnfn select(short4 a, short4 b, uchar4 c); +ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, uchar4 c); +short8 __ovld __cnfn select(short8 a, short8 b, uchar8 c); +ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, uchar8 c); +short16 __ovld __cnfn select(short16 a, short16 b, uchar16 c); +ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, uchar16 c); +int __ovld __cnfn select(int a, int b, uchar c); +uint __ovld __cnfn select(uint a, uint b, uchar c); +int2 __ovld __cnfn select(int2 a, int2 b, uchar2 c); +uint2 __ovld __cnfn select(uint2 a, uint2 b, uchar2 c); +int3 __ovld __cnfn select(int3 a, int3 b, uchar3 c); +uint3 __ovld __cnfn select(uint3 a, uint3 b, uchar3 c); +int4 __ovld __cnfn select(int4 a, int4 b, uchar4 c); +uint4 __ovld __cnfn select(uint4 a, uint4 b, uchar4 c); +int8 __ovld __cnfn select(int8 a, int8 b, uchar8 c); +uint8 __ovld __cnfn select(uint8 a, uint8 b, uchar8 c); +int16 __ovld __cnfn select(int16 a, int16 b, uchar16 c); +uint16 __ovld __cnfn select(uint16 a, uint16 b, uchar16 c); +long __ovld __cnfn select(long a, long b, uchar c); +ulong __ovld __cnfn select(ulong a, ulong b, uchar c); +long2 __ovld __cnfn select(long2 a, long2 b, uchar2 c); +ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, uchar2 c); +long3 __ovld __cnfn select(long3 a, long3 b, uchar3 c); +ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, uchar3 c); +long4 __ovld __cnfn select(long4 a, long4 b, uchar4 c); +ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, uchar4 c); +long8 __ovld __cnfn select(long8 a, long8 b, uchar8 c); +ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, uchar8 c); +long16 __ovld __cnfn select(long16 a, long16 b, uchar16 c); +ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, uchar16 c); +float __ovld __cnfn select(float a, float b, uchar c); +float2 __ovld __cnfn select(float2 a, float2 b, uchar2 c); +float3 __ovld __cnfn select(float3 a, float3 b, uchar3 c); +float4 __ovld __cnfn select(float4 a, float4 b, uchar4 c); +float8 __ovld __cnfn select(float8 a, float8 b, uchar8 c); +float16 __ovld __cnfn select(float16 a, float16 b, uchar16 c); +char __ovld __cnfn select(char a, char b, ushort c); +uchar __ovld __cnfn select(uchar a, uchar b, ushort c); +char2 __ovld __cnfn select(char2 a, char2 b, ushort2 c); +uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, ushort2 c); +char3 __ovld __cnfn select(char3 a, char3 b, ushort3 c); +uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, ushort3 c); +char4 __ovld __cnfn select(char4 a, char4 b, ushort4 c); +uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, ushort4 c); +char8 __ovld __cnfn select(char8 a, char8 b, ushort8 c); +uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, ushort8 c); +char16 __ovld __cnfn select(char16 a, char16 b, ushort16 c); +uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, ushort16 c); +short __ovld __cnfn select(short a, short b, ushort c); +ushort __ovld __cnfn select(ushort a, ushort b, ushort c); +short2 __ovld __cnfn select(short2 a, short2 b, ushort2 c); +ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, ushort2 c); +short3 __ovld __cnfn select(short3 a, short3 b, ushort3 c); +ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, ushort3 c); +short4 __ovld __cnfn select(short4 a, short4 b, ushort4 c); +ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, ushort4 c); +short8 __ovld __cnfn select(short8 a, short8 b, ushort8 c); +ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, ushort8 c); +short16 __ovld __cnfn select(short16 a, short16 b, ushort16 c); +ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, ushort16 c); +int __ovld __cnfn select(int a, int b, ushort c); +uint __ovld __cnfn select(uint a, uint b, ushort c); +int2 __ovld __cnfn select(int2 a, int2 b, ushort2 c); +uint2 __ovld __cnfn select(uint2 a, uint2 b, ushort2 c); +int3 __ovld __cnfn select(int3 a, int3 b, ushort3 c); +uint3 __ovld __cnfn select(uint3 a, uint3 b, ushort3 c); +int4 __ovld __cnfn select(int4 a, int4 b, ushort4 c); +uint4 __ovld __cnfn select(uint4 a, uint4 b, ushort4 c); +int8 __ovld __cnfn select(int8 a, int8 b, ushort8 c); +uint8 __ovld __cnfn select(uint8 a, uint8 b, ushort8 c); +int16 __ovld __cnfn select(int16 a, int16 b, ushort16 c); +uint16 __ovld __cnfn select(uint16 a, uint16 b, ushort16 c); +long __ovld __cnfn select(long a, long b, ushort c); +ulong __ovld __cnfn select(ulong a, ulong b, ushort c); +long2 __ovld __cnfn select(long2 a, long2 b, ushort2 c); +ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, ushort2 c); +long3 __ovld __cnfn select(long3 a, long3 b, ushort3 c); +ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, ushort3 c); +long4 __ovld __cnfn select(long4 a, long4 b, ushort4 c); +ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, ushort4 c); +long8 __ovld __cnfn select(long8 a, long8 b, ushort8 c); +ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, ushort8 c); +long16 __ovld __cnfn select(long16 a, long16 b, ushort16 c); +ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, ushort16 c); +float __ovld __cnfn select(float a, float b, ushort c); +float2 __ovld __cnfn select(float2 a, float2 b, ushort2 c); +float3 __ovld __cnfn select(float3 a, float3 b, ushort3 c); +float4 __ovld __cnfn select(float4 a, float4 b, ushort4 c); +float8 __ovld __cnfn select(float8 a, float8 b, ushort8 c); +float16 __ovld __cnfn select(float16 a, float16 b, ushort16 c); +char __ovld __cnfn select(char a, char b, uint c); +uchar __ovld __cnfn select(uchar a, uchar b, uint c); +char2 __ovld __cnfn select(char2 a, char2 b, uint2 c); +uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, uint2 c); +char3 __ovld __cnfn select(char3 a, char3 b, uint3 c); +uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, uint3 c); +char4 __ovld __cnfn select(char4 a, char4 b, uint4 c); +uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, uint4 c); +char8 __ovld __cnfn select(char8 a, char8 b, uint8 c); +uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, uint8 c); +char16 __ovld __cnfn select(char16 a, char16 b, uint16 c); +uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, uint16 c); +short __ovld __cnfn select(short a, short b, uint c); +ushort __ovld __cnfn select(ushort a, ushort b, uint c); +short2 __ovld __cnfn select(short2 a, short2 b, uint2 c); +ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, uint2 c); +short3 __ovld __cnfn select(short3 a, short3 b, uint3 c); +ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, uint3 c); +short4 __ovld __cnfn select(short4 a, short4 b, uint4 c); +ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, uint4 c); +short8 __ovld __cnfn select(short8 a, short8 b, uint8 c); +ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, uint8 c); +short16 __ovld __cnfn select(short16 a, short16 b, uint16 c); +ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, uint16 c); +int __ovld __cnfn select(int a, int b, uint c); +uint __ovld __cnfn select(uint a, uint b, uint c); +int2 __ovld __cnfn select(int2 a, int2 b, uint2 c); +uint2 __ovld __cnfn select(uint2 a, uint2 b, uint2 c); +int3 __ovld __cnfn select(int3 a, int3 b, uint3 c); +uint3 __ovld __cnfn select(uint3 a, uint3 b, uint3 c); +int4 __ovld __cnfn select(int4 a, int4 b, uint4 c); +uint4 __ovld __cnfn select(uint4 a, uint4 b, uint4 c); +int8 __ovld __cnfn select(int8 a, int8 b, uint8 c); +uint8 __ovld __cnfn select(uint8 a, uint8 b, uint8 c); +int16 __ovld __cnfn select(int16 a, int16 b, uint16 c); +uint16 __ovld __cnfn select(uint16 a, uint16 b, uint16 c); +long __ovld __cnfn select(long a, long b, uint c); +ulong __ovld __cnfn select(ulong a, ulong b, uint c); +long2 __ovld __cnfn select(long2 a, long2 b, uint2 c); +ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, uint2 c); +long3 __ovld __cnfn select(long3 a, long3 b, uint3 c); +ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, uint3 c); +long4 __ovld __cnfn select(long4 a, long4 b, uint4 c); +ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, uint4 c); +long8 __ovld __cnfn select(long8 a, long8 b, uint8 c); +ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, uint8 c); +long16 __ovld __cnfn select(long16 a, long16 b, uint16 c); +ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, uint16 c); +float __ovld __cnfn select(float a, float b, uint c); +float2 __ovld __cnfn select(float2 a, float2 b, uint2 c); +float3 __ovld __cnfn select(float3 a, float3 b, uint3 c); +float4 __ovld __cnfn select(float4 a, float4 b, uint4 c); +float8 __ovld __cnfn select(float8 a, float8 b, uint8 c); +float16 __ovld __cnfn select(float16 a, float16 b, uint16 c); +char __ovld __cnfn select(char a, char b, ulong c); +uchar __ovld __cnfn select(uchar a, uchar b, ulong c); +char2 __ovld __cnfn select(char2 a, char2 b, ulong2 c); +uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, ulong2 c); +char3 __ovld __cnfn select(char3 a, char3 b, ulong3 c); +uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, ulong3 c); +char4 __ovld __cnfn select(char4 a, char4 b, ulong4 c); +uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, ulong4 c); +char8 __ovld __cnfn select(char8 a, char8 b, ulong8 c); +uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, ulong8 c); +char16 __ovld __cnfn select(char16 a, char16 b, ulong16 c); +uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, ulong16 c); +short __ovld __cnfn select(short a, short b, ulong c); +ushort __ovld __cnfn select(ushort a, ushort b, ulong c); +short2 __ovld __cnfn select(short2 a, short2 b, ulong2 c); +ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, ulong2 c); +short3 __ovld __cnfn select(short3 a, short3 b, ulong3 c); +ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, ulong3 c); +short4 __ovld __cnfn select(short4 a, short4 b, ulong4 c); +ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, ulong4 c); +short8 __ovld __cnfn select(short8 a, short8 b, ulong8 c); +ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, ulong8 c); +short16 __ovld __cnfn select(short16 a, short16 b, ulong16 c); +ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, ulong16 c); +int __ovld __cnfn select(int a, int b, ulong c); +uint __ovld __cnfn select(uint a, uint b, ulong c); +int2 __ovld __cnfn select(int2 a, int2 b, ulong2 c); +uint2 __ovld __cnfn select(uint2 a, uint2 b, ulong2 c); +int3 __ovld __cnfn select(int3 a, int3 b, ulong3 c); +uint3 __ovld __cnfn select(uint3 a, uint3 b, ulong3 c); +int4 __ovld __cnfn select(int4 a, int4 b, ulong4 c); +uint4 __ovld __cnfn select(uint4 a, uint4 b, ulong4 c); +int8 __ovld __cnfn select(int8 a, int8 b, ulong8 c); +uint8 __ovld __cnfn select(uint8 a, uint8 b, ulong8 c); +int16 __ovld __cnfn select(int16 a, int16 b, ulong16 c); +uint16 __ovld __cnfn select(uint16 a, uint16 b, ulong16 c); +long __ovld __cnfn select(long a, long b, ulong c); +ulong __ovld __cnfn select(ulong a, ulong b, ulong c); +long2 __ovld __cnfn select(long2 a, long2 b, ulong2 c); +ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, ulong2 c); +long3 __ovld __cnfn select(long3 a, long3 b, ulong3 c); +ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, ulong3 c); +long4 __ovld __cnfn select(long4 a, long4 b, ulong4 c); +ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, ulong4 c); +long8 __ovld __cnfn select(long8 a, long8 b, ulong8 c); +ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, ulong8 c); +long16 __ovld __cnfn select(long16 a, long16 b, ulong16 c); +ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, ulong16 c); +float __ovld __cnfn select(float a, float b, ulong c); +float2 __ovld __cnfn select(float2 a, float2 b, ulong2 c); +float3 __ovld __cnfn select(float3 a, float3 b, ulong3 c); +float4 __ovld __cnfn select(float4 a, float4 b, ulong4 c); +float8 __ovld __cnfn select(float8 a, float8 b, ulong8 c); +float16 __ovld __cnfn select(float16 a, float16 b, ulong16 c); +#ifdef cl_khr_fp64 +double __ovld __cnfn select(double a, double b, long c); +double2 __ovld __cnfn select(double2 a, double2 b, long2 c); +double3 __ovld __cnfn select(double3 a, double3 b, long3 c); +double4 __ovld __cnfn select(double4 a, double4 b, long4 c); +double8 __ovld __cnfn select(double8 a, double8 b, long8 c); +double16 __ovld __cnfn select(double16 a, double16 b, long16 c); +double __ovld __cnfn select(double a, double b, ulong c); +double2 __ovld __cnfn select(double2 a, double2 b, ulong2 c); +double3 __ovld __cnfn select(double3 a, double3 b, ulong3 c); +double4 __ovld __cnfn select(double4 a, double4 b, ulong4 c); +double8 __ovld __cnfn select(double8 a, double8 b, ulong8 c); +double16 __ovld __cnfn select(double16 a, double16 b, ulong16 c); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +half __ovld __cnfn select(half a, half b, short c); +half2 __ovld __cnfn select(half2 a, half2 b, short2 c); +half3 __ovld __cnfn select(half3 a, half3 b, short3 c); +half4 __ovld __cnfn select(half4 a, half4 b, short4 c); +half8 __ovld __cnfn select(half8 a, half8 b, short8 c); +half16 __ovld __cnfn select(half16 a, half16 b, short16 c); +half __ovld __cnfn select(half a, half b, ushort c); +half2 __ovld __cnfn select(half2 a, half2 b, ushort2 c); +half3 __ovld __cnfn select(half3 a, half3 b, ushort3 c); +half4 __ovld __cnfn select(half4 a, half4 b, ushort4 c); +half8 __ovld __cnfn select(half8 a, half8 b, ushort8 c); +half16 __ovld __cnfn select(half16 a, half16 b, ushort16 c); +#endif //cl_khr_fp16 + +// OpenCL v1.1 s6.11.7, v1.2 s6.12.7, v2.0 s6.13.7 - Vector Data Load and Store Functions +// OpenCL extensions v1.1 s9.6.6, v1.2 s9.5.6, v2.0 s9.4.6 - Vector Data Load and Store Functions for Half Type +/** + * Use generic type gentype to indicate the built-in data types + * char, uchar, short, ushort, int, uint, long, ulong, float, + * double or half. + * + * vloadn return sizeof (gentypen) bytes of data read from address (p + (offset * n)). + * + * vstoren write sizeof (gentypen) bytes given by data to address (p + (offset * n)). + * + * The address computed as (p + (offset * n)) must be + * 8-bit aligned if gentype is char, uchar; + * 16-bit aligned if gentype is short, ushort, half; + * 32-bit aligned if gentype is int, uint, float; + * 64-bit aligned if gentype is long, ulong, double. + */ + +char2 __ovld vload2(size_t offset, const __constant char *p); +uchar2 __ovld vload2(size_t offset, const __constant uchar *p); +short2 __ovld vload2(size_t offset, const __constant short *p); +ushort2 __ovld vload2(size_t offset, const __constant ushort *p); +int2 __ovld vload2(size_t offset, const __constant int *p); +uint2 __ovld vload2(size_t offset, const __constant uint *p); +long2 __ovld vload2(size_t offset, const __constant long *p); +ulong2 __ovld vload2(size_t offset, const __constant ulong *p); +float2 __ovld vload2(size_t offset, const __constant float *p); +char3 __ovld vload3(size_t offset, const __constant char *p); +uchar3 __ovld vload3(size_t offset, const __constant uchar *p); +short3 __ovld vload3(size_t offset, const __constant short *p); +ushort3 __ovld vload3(size_t offset, const __constant ushort *p); +int3 __ovld vload3(size_t offset, const __constant int *p); +uint3 __ovld vload3(size_t offset, const __constant uint *p); +long3 __ovld vload3(size_t offset, const __constant long *p); +ulong3 __ovld vload3(size_t offset, const __constant ulong *p); +float3 __ovld vload3(size_t offset, const __constant float *p); +char4 __ovld vload4(size_t offset, const __constant char *p); +uchar4 __ovld vload4(size_t offset, const __constant uchar *p); +short4 __ovld vload4(size_t offset, const __constant short *p); +ushort4 __ovld vload4(size_t offset, const __constant ushort *p); +int4 __ovld vload4(size_t offset, const __constant int *p); +uint4 __ovld vload4(size_t offset, const __constant uint *p); +long4 __ovld vload4(size_t offset, const __constant long *p); +ulong4 __ovld vload4(size_t offset, const __constant ulong *p); +float4 __ovld vload4(size_t offset, const __constant float *p); +char8 __ovld vload8(size_t offset, const __constant char *p); +uchar8 __ovld vload8(size_t offset, const __constant uchar *p); +short8 __ovld vload8(size_t offset, const __constant short *p); +ushort8 __ovld vload8(size_t offset, const __constant ushort *p); +int8 __ovld vload8(size_t offset, const __constant int *p); +uint8 __ovld vload8(size_t offset, const __constant uint *p); +long8 __ovld vload8(size_t offset, const __constant long *p); +ulong8 __ovld vload8(size_t offset, const __constant ulong *p); +float8 __ovld vload8(size_t offset, const __constant float *p); +char16 __ovld vload16(size_t offset, const __constant char *p); +uchar16 __ovld vload16(size_t offset, const __constant uchar *p); +short16 __ovld vload16(size_t offset, const __constant short *p); +ushort16 __ovld vload16(size_t offset, const __constant ushort *p); +int16 __ovld vload16(size_t offset, const __constant int *p); +uint16 __ovld vload16(size_t offset, const __constant uint *p); +long16 __ovld vload16(size_t offset, const __constant long *p); +ulong16 __ovld vload16(size_t offset, const __constant ulong *p); +float16 __ovld vload16(size_t offset, const __constant float *p); +#ifdef cl_khr_fp64 +double2 __ovld vload2(size_t offset, const __constant double *p); +double3 __ovld vload3(size_t offset, const __constant double *p); +double4 __ovld vload4(size_t offset, const __constant double *p); +double8 __ovld vload8(size_t offset, const __constant double *p); +double16 __ovld vload16(size_t offset, const __constant double *p); +#endif //cl_khr_fp64 + +#ifdef cl_khr_fp16 +half __ovld vload(size_t offset, const __constant half *p); +half2 __ovld vload2(size_t offset, const __constant half *p); +half3 __ovld vload3(size_t offset, const __constant half *p); +half4 __ovld vload4(size_t offset, const __constant half *p); +half8 __ovld vload8(size_t offset, const __constant half *p); +half16 __ovld vload16(size_t offset, const __constant half *p); +#endif //cl_khr_fp16 + +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +char2 __ovld vload2(size_t offset, const char *p); +uchar2 __ovld vload2(size_t offset, const uchar *p); +short2 __ovld vload2(size_t offset, const short *p); +ushort2 __ovld vload2(size_t offset, const ushort *p); +int2 __ovld vload2(size_t offset, const int *p); +uint2 __ovld vload2(size_t offset, const uint *p); +long2 __ovld vload2(size_t offset, const long *p); +ulong2 __ovld vload2(size_t offset, const ulong *p); +float2 __ovld vload2(size_t offset, const float *p); +char3 __ovld vload3(size_t offset, const char *p); +uchar3 __ovld vload3(size_t offset, const uchar *p); +short3 __ovld vload3(size_t offset, const short *p); +ushort3 __ovld vload3(size_t offset, const ushort *p); +int3 __ovld vload3(size_t offset, const int *p); +uint3 __ovld vload3(size_t offset, const uint *p); +long3 __ovld vload3(size_t offset, const long *p); +ulong3 __ovld vload3(size_t offset, const ulong *p); +float3 __ovld vload3(size_t offset, const float *p); +char4 __ovld vload4(size_t offset, const char *p); +uchar4 __ovld vload4(size_t offset, const uchar *p); +short4 __ovld vload4(size_t offset, const short *p); +ushort4 __ovld vload4(size_t offset, const ushort *p); +int4 __ovld vload4(size_t offset, const int *p); +uint4 __ovld vload4(size_t offset, const uint *p); +long4 __ovld vload4(size_t offset, const long *p); +ulong4 __ovld vload4(size_t offset, const ulong *p); +float4 __ovld vload4(size_t offset, const float *p); +char8 __ovld vload8(size_t offset, const char *p); +uchar8 __ovld vload8(size_t offset, const uchar *p); +short8 __ovld vload8(size_t offset, const short *p); +ushort8 __ovld vload8(size_t offset, const ushort *p); +int8 __ovld vload8(size_t offset, const int *p); +uint8 __ovld vload8(size_t offset, const uint *p); +long8 __ovld vload8(size_t offset, const long *p); +ulong8 __ovld vload8(size_t offset, const ulong *p); +float8 __ovld vload8(size_t offset, const float *p); +char16 __ovld vload16(size_t offset, const char *p); +uchar16 __ovld vload16(size_t offset, const uchar *p); +short16 __ovld vload16(size_t offset, const short *p); +ushort16 __ovld vload16(size_t offset, const ushort *p); +int16 __ovld vload16(size_t offset, const int *p); +uint16 __ovld vload16(size_t offset, const uint *p); +long16 __ovld vload16(size_t offset, const long *p); +ulong16 __ovld vload16(size_t offset, const ulong *p); +float16 __ovld vload16(size_t offset, const float *p); + +#ifdef cl_khr_fp64 +double2 __ovld vload2(size_t offset, const double *p); +double3 __ovld vload3(size_t offset, const double *p); +double4 __ovld vload4(size_t offset, const double *p); +double8 __ovld vload8(size_t offset, const double *p); +double16 __ovld vload16(size_t offset, const double *p); +#endif //cl_khr_fp64 + +#ifdef cl_khr_fp16 +half __ovld vload(size_t offset, const half *p); +half2 __ovld vload2(size_t offset, const half *p); +half3 __ovld vload3(size_t offset, const half *p); +half4 __ovld vload4(size_t offset, const half *p); +half8 __ovld vload8(size_t offset, const half *p); +half16 __ovld vload16(size_t offset, const half *p); +#endif //cl_khr_fp16 +#else +char2 __ovld vload2(size_t offset, const __global char *p); +uchar2 __ovld vload2(size_t offset, const __global uchar *p); +short2 __ovld vload2(size_t offset, const __global short *p); +ushort2 __ovld vload2(size_t offset, const __global ushort *p); +int2 __ovld vload2(size_t offset, const __global int *p); +uint2 __ovld vload2(size_t offset, const __global uint *p); +long2 __ovld vload2(size_t offset, const __global long *p); +ulong2 __ovld vload2(size_t offset, const __global ulong *p); +float2 __ovld vload2(size_t offset, const __global float *p); +char3 __ovld vload3(size_t offset, const __global char *p); +uchar3 __ovld vload3(size_t offset, const __global uchar *p); +short3 __ovld vload3(size_t offset, const __global short *p); +ushort3 __ovld vload3(size_t offset, const __global ushort *p); +int3 __ovld vload3(size_t offset, const __global int *p); +uint3 __ovld vload3(size_t offset, const __global uint *p); +long3 __ovld vload3(size_t offset, const __global long *p); +ulong3 __ovld vload3(size_t offset, const __global ulong *p); +float3 __ovld vload3(size_t offset, const __global float *p); +char4 __ovld vload4(size_t offset, const __global char *p); +uchar4 __ovld vload4(size_t offset, const __global uchar *p); +short4 __ovld vload4(size_t offset, const __global short *p); +ushort4 __ovld vload4(size_t offset, const __global ushort *p); +int4 __ovld vload4(size_t offset, const __global int *p); +uint4 __ovld vload4(size_t offset, const __global uint *p); +long4 __ovld vload4(size_t offset, const __global long *p); +ulong4 __ovld vload4(size_t offset, const __global ulong *p); +float4 __ovld vload4(size_t offset, const __global float *p); +char8 __ovld vload8(size_t offset, const __global char *p); +uchar8 __ovld vload8(size_t offset, const __global uchar *p); +short8 __ovld vload8(size_t offset, const __global short *p); +ushort8 __ovld vload8(size_t offset, const __global ushort *p); +int8 __ovld vload8(size_t offset, const __global int *p); +uint8 __ovld vload8(size_t offset, const __global uint *p); +long8 __ovld vload8(size_t offset, const __global long *p); +ulong8 __ovld vload8(size_t offset, const __global ulong *p); +float8 __ovld vload8(size_t offset, const __global float *p); +char16 __ovld vload16(size_t offset, const __global char *p); +uchar16 __ovld vload16(size_t offset, const __global uchar *p); +short16 __ovld vload16(size_t offset, const __global short *p); +ushort16 __ovld vload16(size_t offset, const __global ushort *p); +int16 __ovld vload16(size_t offset, const __global int *p); +uint16 __ovld vload16(size_t offset, const __global uint *p); +long16 __ovld vload16(size_t offset, const __global long *p); +ulong16 __ovld vload16(size_t offset, const __global ulong *p); +float16 __ovld vload16(size_t offset, const __global float *p); +char2 __ovld vload2(size_t offset, const __local char *p); +uchar2 __ovld vload2(size_t offset, const __local uchar *p); +short2 __ovld vload2(size_t offset, const __local short *p); +ushort2 __ovld vload2(size_t offset, const __local ushort *p); +int2 __ovld vload2(size_t offset, const __local int *p); +uint2 __ovld vload2(size_t offset, const __local uint *p); +long2 __ovld vload2(size_t offset, const __local long *p); +ulong2 __ovld vload2(size_t offset, const __local ulong *p); +float2 __ovld vload2(size_t offset, const __local float *p); +char3 __ovld vload3(size_t offset, const __local char *p); +uchar3 __ovld vload3(size_t offset, const __local uchar *p); +short3 __ovld vload3(size_t offset, const __local short *p); +ushort3 __ovld vload3(size_t offset, const __local ushort *p); +int3 __ovld vload3(size_t offset, const __local int *p); +uint3 __ovld vload3(size_t offset, const __local uint *p); +long3 __ovld vload3(size_t offset, const __local long *p); +ulong3 __ovld vload3(size_t offset, const __local ulong *p); +float3 __ovld vload3(size_t offset, const __local float *p); +char4 __ovld vload4(size_t offset, const __local char *p); +uchar4 __ovld vload4(size_t offset, const __local uchar *p); +short4 __ovld vload4(size_t offset, const __local short *p); +ushort4 __ovld vload4(size_t offset, const __local ushort *p); +int4 __ovld vload4(size_t offset, const __local int *p); +uint4 __ovld vload4(size_t offset, const __local uint *p); +long4 __ovld vload4(size_t offset, const __local long *p); +ulong4 __ovld vload4(size_t offset, const __local ulong *p); +float4 __ovld vload4(size_t offset, const __local float *p); +char8 __ovld vload8(size_t offset, const __local char *p); +uchar8 __ovld vload8(size_t offset, const __local uchar *p); +short8 __ovld vload8(size_t offset, const __local short *p); +ushort8 __ovld vload8(size_t offset, const __local ushort *p); +int8 __ovld vload8(size_t offset, const __local int *p); +uint8 __ovld vload8(size_t offset, const __local uint *p); +long8 __ovld vload8(size_t offset, const __local long *p); +ulong8 __ovld vload8(size_t offset, const __local ulong *p); +float8 __ovld vload8(size_t offset, const __local float *p); +char16 __ovld vload16(size_t offset, const __local char *p); +uchar16 __ovld vload16(size_t offset, const __local uchar *p); +short16 __ovld vload16(size_t offset, const __local short *p); +ushort16 __ovld vload16(size_t offset, const __local ushort *p); +int16 __ovld vload16(size_t offset, const __local int *p); +uint16 __ovld vload16(size_t offset, const __local uint *p); +long16 __ovld vload16(size_t offset, const __local long *p); +ulong16 __ovld vload16(size_t offset, const __local ulong *p); +float16 __ovld vload16(size_t offset, const __local float *p); +char2 __ovld vload2(size_t offset, const __private char *p); +uchar2 __ovld vload2(size_t offset, const __private uchar *p); +short2 __ovld vload2(size_t offset, const __private short *p); +ushort2 __ovld vload2(size_t offset, const __private ushort *p); +int2 __ovld vload2(size_t offset, const __private int *p); +uint2 __ovld vload2(size_t offset, const __private uint *p); +long2 __ovld vload2(size_t offset, const __private long *p); +ulong2 __ovld vload2(size_t offset, const __private ulong *p); +float2 __ovld vload2(size_t offset, const __private float *p); +char3 __ovld vload3(size_t offset, const __private char *p); +uchar3 __ovld vload3(size_t offset, const __private uchar *p); +short3 __ovld vload3(size_t offset, const __private short *p); +ushort3 __ovld vload3(size_t offset, const __private ushort *p); +int3 __ovld vload3(size_t offset, const __private int *p); +uint3 __ovld vload3(size_t offset, const __private uint *p); +long3 __ovld vload3(size_t offset, const __private long *p); +ulong3 __ovld vload3(size_t offset, const __private ulong *p); +float3 __ovld vload3(size_t offset, const __private float *p); +char4 __ovld vload4(size_t offset, const __private char *p); +uchar4 __ovld vload4(size_t offset, const __private uchar *p); +short4 __ovld vload4(size_t offset, const __private short *p); +ushort4 __ovld vload4(size_t offset, const __private ushort *p); +int4 __ovld vload4(size_t offset, const __private int *p); +uint4 __ovld vload4(size_t offset, const __private uint *p); +long4 __ovld vload4(size_t offset, const __private long *p); +ulong4 __ovld vload4(size_t offset, const __private ulong *p); +float4 __ovld vload4(size_t offset, const __private float *p); +char8 __ovld vload8(size_t offset, const __private char *p); +uchar8 __ovld vload8(size_t offset, const __private uchar *p); +short8 __ovld vload8(size_t offset, const __private short *p); +ushort8 __ovld vload8(size_t offset, const __private ushort *p); +int8 __ovld vload8(size_t offset, const __private int *p); +uint8 __ovld vload8(size_t offset, const __private uint *p); +long8 __ovld vload8(size_t offset, const __private long *p); +ulong8 __ovld vload8(size_t offset, const __private ulong *p); +float8 __ovld vload8(size_t offset, const __private float *p); +char16 __ovld vload16(size_t offset, const __private char *p); +uchar16 __ovld vload16(size_t offset, const __private uchar *p); +short16 __ovld vload16(size_t offset, const __private short *p); +ushort16 __ovld vload16(size_t offset, const __private ushort *p); +int16 __ovld vload16(size_t offset, const __private int *p); +uint16 __ovld vload16(size_t offset, const __private uint *p); +long16 __ovld vload16(size_t offset, const __private long *p); +ulong16 __ovld vload16(size_t offset, const __private ulong *p); +float16 __ovld vload16(size_t offset, const __private float *p); + +#ifdef cl_khr_fp64 +double2 __ovld vload2(size_t offset, const __global double *p); +double3 __ovld vload3(size_t offset, const __global double *p); +double4 __ovld vload4(size_t offset, const __global double *p); +double8 __ovld vload8(size_t offset, const __global double *p); +double16 __ovld vload16(size_t offset, const __global double *p); +double2 __ovld vload2(size_t offset, const __local double *p); +double3 __ovld vload3(size_t offset, const __local double *p); +double4 __ovld vload4(size_t offset, const __local double *p); +double8 __ovld vload8(size_t offset, const __local double *p); +double16 __ovld vload16(size_t offset, const __local double *p); +double2 __ovld vload2(size_t offset, const __private double *p); +double3 __ovld vload3(size_t offset, const __private double *p); +double4 __ovld vload4(size_t offset, const __private double *p); +double8 __ovld vload8(size_t offset, const __private double *p); +double16 __ovld vload16(size_t offset, const __private double *p); +#endif //cl_khr_fp64 + +#ifdef cl_khr_fp16 +half __ovld vload(size_t offset, const __global half *p); +half2 __ovld vload2(size_t offset, const __global half *p); +half3 __ovld vload3(size_t offset, const __global half *p); +half4 __ovld vload4(size_t offset, const __global half *p); +half8 __ovld vload8(size_t offset, const __global half *p); +half16 __ovld vload16(size_t offset, const __global half *p); +half __ovld vload(size_t offset, const __local half *p); +half2 __ovld vload2(size_t offset, const __local half *p); +half3 __ovld vload3(size_t offset, const __local half *p); +half4 __ovld vload4(size_t offset, const __local half *p); +half8 __ovld vload8(size_t offset, const __local half *p); +half16 __ovld vload16(size_t offset, const __local half *p); +half __ovld vload(size_t offset, const __private half *p); +half2 __ovld vload2(size_t offset, const __private half *p); +half3 __ovld vload3(size_t offset, const __private half *p); +half4 __ovld vload4(size_t offset, const __private half *p); +half8 __ovld vload8(size_t offset, const __private half *p); +half16 __ovld vload16(size_t offset, const __private half *p); +#endif //cl_khr_fp16 +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +void __ovld vstore2(char2 data, size_t offset, char *p); +void __ovld vstore2(uchar2 data, size_t offset, uchar *p); +void __ovld vstore2(short2 data, size_t offset, short *p); +void __ovld vstore2(ushort2 data, size_t offset, ushort *p); +void __ovld vstore2(int2 data, size_t offset, int *p); +void __ovld vstore2(uint2 data, size_t offset, uint *p); +void __ovld vstore2(long2 data, size_t offset, long *p); +void __ovld vstore2(ulong2 data, size_t offset, ulong *p); +void __ovld vstore2(float2 data, size_t offset, float *p); +void __ovld vstore3(char3 data, size_t offset, char *p); +void __ovld vstore3(uchar3 data, size_t offset, uchar *p); +void __ovld vstore3(short3 data, size_t offset, short *p); +void __ovld vstore3(ushort3 data, size_t offset, ushort *p); +void __ovld vstore3(int3 data, size_t offset, int *p); +void __ovld vstore3(uint3 data, size_t offset, uint *p); +void __ovld vstore3(long3 data, size_t offset, long *p); +void __ovld vstore3(ulong3 data, size_t offset, ulong *p); +void __ovld vstore3(float3 data, size_t offset, float *p); +void __ovld vstore4(char4 data, size_t offset, char *p); +void __ovld vstore4(uchar4 data, size_t offset, uchar *p); +void __ovld vstore4(short4 data, size_t offset, short *p); +void __ovld vstore4(ushort4 data, size_t offset, ushort *p); +void __ovld vstore4(int4 data, size_t offset, int *p); +void __ovld vstore4(uint4 data, size_t offset, uint *p); +void __ovld vstore4(long4 data, size_t offset, long *p); +void __ovld vstore4(ulong4 data, size_t offset, ulong *p); +void __ovld vstore4(float4 data, size_t offset, float *p); +void __ovld vstore8(char8 data, size_t offset, char *p); +void __ovld vstore8(uchar8 data, size_t offset, uchar *p); +void __ovld vstore8(short8 data, size_t offset, short *p); +void __ovld vstore8(ushort8 data, size_t offset, ushort *p); +void __ovld vstore8(int8 data, size_t offset, int *p); +void __ovld vstore8(uint8 data, size_t offset, uint *p); +void __ovld vstore8(long8 data, size_t offset, long *p); +void __ovld vstore8(ulong8 data, size_t offset, ulong *p); +void __ovld vstore8(float8 data, size_t offset, float *p); +void __ovld vstore16(char16 data, size_t offset, char *p); +void __ovld vstore16(uchar16 data, size_t offset, uchar *p); +void __ovld vstore16(short16 data, size_t offset, short *p); +void __ovld vstore16(ushort16 data, size_t offset, ushort *p); +void __ovld vstore16(int16 data, size_t offset, int *p); +void __ovld vstore16(uint16 data, size_t offset, uint *p); +void __ovld vstore16(long16 data, size_t offset, long *p); +void __ovld vstore16(ulong16 data, size_t offset, ulong *p); +void __ovld vstore16(float16 data, size_t offset, float *p); +#ifdef cl_khr_fp64 +void __ovld vstore2(double2 data, size_t offset, double *p); +void __ovld vstore3(double3 data, size_t offset, double *p); +void __ovld vstore4(double4 data, size_t offset, double *p); +void __ovld vstore8(double8 data, size_t offset, double *p); +void __ovld vstore16(double16 data, size_t offset, double *p); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +void __ovld vstore(half data, size_t offset, half *p); +void __ovld vstore2(half2 data, size_t offset, half *p); +void __ovld vstore3(half3 data, size_t offset, half *p); +void __ovld vstore4(half4 data, size_t offset, half *p); +void __ovld vstore8(half8 data, size_t offset, half *p); +void __ovld vstore16(half16 data, size_t offset, half *p); +#endif //cl_khr_fp16 +#else +void __ovld vstore2(char2 data, size_t offset, __global char *p); +void __ovld vstore2(uchar2 data, size_t offset, __global uchar *p); +void __ovld vstore2(short2 data, size_t offset, __global short *p); +void __ovld vstore2(ushort2 data, size_t offset, __global ushort *p); +void __ovld vstore2(int2 data, size_t offset, __global int *p); +void __ovld vstore2(uint2 data, size_t offset, __global uint *p); +void __ovld vstore2(long2 data, size_t offset, __global long *p); +void __ovld vstore2(ulong2 data, size_t offset, __global ulong *p); +void __ovld vstore2(float2 data, size_t offset, __global float *p); +void __ovld vstore3(char3 data, size_t offset, __global char *p); +void __ovld vstore3(uchar3 data, size_t offset, __global uchar *p); +void __ovld vstore3(short3 data, size_t offset, __global short *p); +void __ovld vstore3(ushort3 data, size_t offset, __global ushort *p); +void __ovld vstore3(int3 data, size_t offset, __global int *p); +void __ovld vstore3(uint3 data, size_t offset, __global uint *p); +void __ovld vstore3(long3 data, size_t offset, __global long *p); +void __ovld vstore3(ulong3 data, size_t offset, __global ulong *p); +void __ovld vstore3(float3 data, size_t offset, __global float *p); +void __ovld vstore4(char4 data, size_t offset, __global char *p); +void __ovld vstore4(uchar4 data, size_t offset, __global uchar *p); +void __ovld vstore4(short4 data, size_t offset, __global short *p); +void __ovld vstore4(ushort4 data, size_t offset, __global ushort *p); +void __ovld vstore4(int4 data, size_t offset, __global int *p); +void __ovld vstore4(uint4 data, size_t offset, __global uint *p); +void __ovld vstore4(long4 data, size_t offset, __global long *p); +void __ovld vstore4(ulong4 data, size_t offset, __global ulong *p); +void __ovld vstore4(float4 data, size_t offset, __global float *p); +void __ovld vstore8(char8 data, size_t offset, __global char *p); +void __ovld vstore8(uchar8 data, size_t offset, __global uchar *p); +void __ovld vstore8(short8 data, size_t offset, __global short *p); +void __ovld vstore8(ushort8 data, size_t offset, __global ushort *p); +void __ovld vstore8(int8 data, size_t offset, __global int *p); +void __ovld vstore8(uint8 data, size_t offset, __global uint *p); +void __ovld vstore8(long8 data, size_t offset, __global long *p); +void __ovld vstore8(ulong8 data, size_t offset, __global ulong *p); +void __ovld vstore8(float8 data, size_t offset, __global float *p); +void __ovld vstore16(char16 data, size_t offset, __global char *p); +void __ovld vstore16(uchar16 data, size_t offset, __global uchar *p); +void __ovld vstore16(short16 data, size_t offset, __global short *p); +void __ovld vstore16(ushort16 data, size_t offset, __global ushort *p); +void __ovld vstore16(int16 data, size_t offset, __global int *p); +void __ovld vstore16(uint16 data, size_t offset, __global uint *p); +void __ovld vstore16(long16 data, size_t offset, __global long *p); +void __ovld vstore16(ulong16 data, size_t offset, __global ulong *p); +void __ovld vstore16(float16 data, size_t offset, __global float *p); +void __ovld vstore2(char2 data, size_t offset, __local char *p); +void __ovld vstore2(uchar2 data, size_t offset, __local uchar *p); +void __ovld vstore2(short2 data, size_t offset, __local short *p); +void __ovld vstore2(ushort2 data, size_t offset, __local ushort *p); +void __ovld vstore2(int2 data, size_t offset, __local int *p); +void __ovld vstore2(uint2 data, size_t offset, __local uint *p); +void __ovld vstore2(long2 data, size_t offset, __local long *p); +void __ovld vstore2(ulong2 data, size_t offset, __local ulong *p); +void __ovld vstore2(float2 data, size_t offset, __local float *p); +void __ovld vstore3(char3 data, size_t offset, __local char *p); +void __ovld vstore3(uchar3 data, size_t offset, __local uchar *p); +void __ovld vstore3(short3 data, size_t offset, __local short *p); +void __ovld vstore3(ushort3 data, size_t offset, __local ushort *p); +void __ovld vstore3(int3 data, size_t offset, __local int *p); +void __ovld vstore3(uint3 data, size_t offset, __local uint *p); +void __ovld vstore3(long3 data, size_t offset, __local long *p); +void __ovld vstore3(ulong3 data, size_t offset, __local ulong *p); +void __ovld vstore3(float3 data, size_t offset, __local float *p); +void __ovld vstore4(char4 data, size_t offset, __local char *p); +void __ovld vstore4(uchar4 data, size_t offset, __local uchar *p); +void __ovld vstore4(short4 data, size_t offset, __local short *p); +void __ovld vstore4(ushort4 data, size_t offset, __local ushort *p); +void __ovld vstore4(int4 data, size_t offset, __local int *p); +void __ovld vstore4(uint4 data, size_t offset, __local uint *p); +void __ovld vstore4(long4 data, size_t offset, __local long *p); +void __ovld vstore4(ulong4 data, size_t offset, __local ulong *p); +void __ovld vstore4(float4 data, size_t offset, __local float *p); +void __ovld vstore8(char8 data, size_t offset, __local char *p); +void __ovld vstore8(uchar8 data, size_t offset, __local uchar *p); +void __ovld vstore8(short8 data, size_t offset, __local short *p); +void __ovld vstore8(ushort8 data, size_t offset, __local ushort *p); +void __ovld vstore8(int8 data, size_t offset, __local int *p); +void __ovld vstore8(uint8 data, size_t offset, __local uint *p); +void __ovld vstore8(long8 data, size_t offset, __local long *p); +void __ovld vstore8(ulong8 data, size_t offset, __local ulong *p); +void __ovld vstore8(float8 data, size_t offset, __local float *p); +void __ovld vstore16(char16 data, size_t offset, __local char *p); +void __ovld vstore16(uchar16 data, size_t offset, __local uchar *p); +void __ovld vstore16(short16 data, size_t offset, __local short *p); +void __ovld vstore16(ushort16 data, size_t offset, __local ushort *p); +void __ovld vstore16(int16 data, size_t offset, __local int *p); +void __ovld vstore16(uint16 data, size_t offset, __local uint *p); +void __ovld vstore16(long16 data, size_t offset, __local long *p); +void __ovld vstore16(ulong16 data, size_t offset, __local ulong *p); +void __ovld vstore16(float16 data, size_t offset, __local float *p); +void __ovld vstore2(char2 data, size_t offset, __private char *p); +void __ovld vstore2(uchar2 data, size_t offset, __private uchar *p); +void __ovld vstore2(short2 data, size_t offset, __private short *p); +void __ovld vstore2(ushort2 data, size_t offset, __private ushort *p); +void __ovld vstore2(int2 data, size_t offset, __private int *p); +void __ovld vstore2(uint2 data, size_t offset, __private uint *p); +void __ovld vstore2(long2 data, size_t offset, __private long *p); +void __ovld vstore2(ulong2 data, size_t offset, __private ulong *p); +void __ovld vstore2(float2 data, size_t offset, __private float *p); +void __ovld vstore3(char3 data, size_t offset, __private char *p); +void __ovld vstore3(uchar3 data, size_t offset, __private uchar *p); +void __ovld vstore3(short3 data, size_t offset, __private short *p); +void __ovld vstore3(ushort3 data, size_t offset, __private ushort *p); +void __ovld vstore3(int3 data, size_t offset, __private int *p); +void __ovld vstore3(uint3 data, size_t offset, __private uint *p); +void __ovld vstore3(long3 data, size_t offset, __private long *p); +void __ovld vstore3(ulong3 data, size_t offset, __private ulong *p); +void __ovld vstore3(float3 data, size_t offset, __private float *p); +void __ovld vstore4(char4 data, size_t offset, __private char *p); +void __ovld vstore4(uchar4 data, size_t offset, __private uchar *p); +void __ovld vstore4(short4 data, size_t offset, __private short *p); +void __ovld vstore4(ushort4 data, size_t offset, __private ushort *p); +void __ovld vstore4(int4 data, size_t offset, __private int *p); +void __ovld vstore4(uint4 data, size_t offset, __private uint *p); +void __ovld vstore4(long4 data, size_t offset, __private long *p); +void __ovld vstore4(ulong4 data, size_t offset, __private ulong *p); +void __ovld vstore4(float4 data, size_t offset, __private float *p); +void __ovld vstore8(char8 data, size_t offset, __private char *p); +void __ovld vstore8(uchar8 data, size_t offset, __private uchar *p); +void __ovld vstore8(short8 data, size_t offset, __private short *p); +void __ovld vstore8(ushort8 data, size_t offset, __private ushort *p); +void __ovld vstore8(int8 data, size_t offset, __private int *p); +void __ovld vstore8(uint8 data, size_t offset, __private uint *p); +void __ovld vstore8(long8 data, size_t offset, __private long *p); +void __ovld vstore8(ulong8 data, size_t offset, __private ulong *p); +void __ovld vstore8(float8 data, size_t offset, __private float *p); +void __ovld vstore16(char16 data, size_t offset, __private char *p); +void __ovld vstore16(uchar16 data, size_t offset, __private uchar *p); +void __ovld vstore16(short16 data, size_t offset, __private short *p); +void __ovld vstore16(ushort16 data, size_t offset, __private ushort *p); +void __ovld vstore16(int16 data, size_t offset, __private int *p); +void __ovld vstore16(uint16 data, size_t offset, __private uint *p); +void __ovld vstore16(long16 data, size_t offset, __private long *p); +void __ovld vstore16(ulong16 data, size_t offset, __private ulong *p); +void __ovld vstore16(float16 data, size_t offset, __private float *p); +#ifdef cl_khr_fp64 +void __ovld vstore2(double2 data, size_t offset, __global double *p); +void __ovld vstore3(double3 data, size_t offset, __global double *p); +void __ovld vstore4(double4 data, size_t offset, __global double *p); +void __ovld vstore8(double8 data, size_t offset, __global double *p); +void __ovld vstore16(double16 data, size_t offset, __global double *p); +void __ovld vstore2(double2 data, size_t offset, __local double *p); +void __ovld vstore3(double3 data, size_t offset, __local double *p); +void __ovld vstore4(double4 data, size_t offset, __local double *p); +void __ovld vstore8(double8 data, size_t offset, __local double *p); +void __ovld vstore16(double16 data, size_t offset, __local double *p); +void __ovld vstore2(double2 data, size_t offset, __private double *p); +void __ovld vstore3(double3 data, size_t offset, __private double *p); +void __ovld vstore4(double4 data, size_t offset, __private double *p); +void __ovld vstore8(double8 data, size_t offset, __private double *p); +void __ovld vstore16(double16 data, size_t offset, __private double *p); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +void __ovld vstore(half data, size_t offset, __global half *p); +void __ovld vstore2(half2 data, size_t offset, __global half *p); +void __ovld vstore3(half3 data, size_t offset, __global half *p); +void __ovld vstore4(half4 data, size_t offset, __global half *p); +void __ovld vstore8(half8 data, size_t offset, __global half *p); +void __ovld vstore16(half16 data, size_t offset, __global half *p); +void __ovld vstore(half data, size_t offset, __local half *p); +void __ovld vstore2(half2 data, size_t offset, __local half *p); +void __ovld vstore3(half3 data, size_t offset, __local half *p); +void __ovld vstore4(half4 data, size_t offset, __local half *p); +void __ovld vstore8(half8 data, size_t offset, __local half *p); +void __ovld vstore16(half16 data, size_t offset, __local half *p); +void __ovld vstore(half data, size_t offset, __private half *p); +void __ovld vstore2(half2 data, size_t offset, __private half *p); +void __ovld vstore3(half3 data, size_t offset, __private half *p); +void __ovld vstore4(half4 data, size_t offset, __private half *p); +void __ovld vstore8(half8 data, size_t offset, __private half *p); +void __ovld vstore16(half16 data, size_t offset, __private half *p); +#endif //cl_khr_fp16 +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +/** + * Read sizeof (half) bytes of data from address + * (p + offset). The data read is interpreted as a + * half value. The half value is converted to a + * float value and the float value is returned. + * The read address computed as (p + offset) + * must be 16-bit aligned. + */ +float __ovld vload_half(size_t offset, const __constant half *p); +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +float __ovld vload_half(size_t offset, const half *p); +#else +float __ovld vload_half(size_t offset, const __global half *p); +float __ovld vload_half(size_t offset, const __local half *p); +float __ovld vload_half(size_t offset, const __private half *p); +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +/** + * Read sizeof (halfn) bytes of data from address + * (p + (offset * n)). The data read is interpreted + * as a halfn value. The halfn value read is + * converted to a floatn value and the floatn + * value is returned. The read address computed + * as (p + (offset * n)) must be 16-bit aligned. + */ +float2 __ovld vload_half2(size_t offset, const __constant half *p); +float3 __ovld vload_half3(size_t offset, const __constant half *p); +float4 __ovld vload_half4(size_t offset, const __constant half *p); +float8 __ovld vload_half8(size_t offset, const __constant half *p); +float16 __ovld vload_half16(size_t offset, const __constant half *p); +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +float2 __ovld vload_half2(size_t offset, const half *p); +float3 __ovld vload_half3(size_t offset, const half *p); +float4 __ovld vload_half4(size_t offset, const half *p); +float8 __ovld vload_half8(size_t offset, const half *p); +float16 __ovld vload_half16(size_t offset, const half *p); +#else +float2 __ovld vload_half2(size_t offset, const __global half *p); +float3 __ovld vload_half3(size_t offset, const __global half *p); +float4 __ovld vload_half4(size_t offset, const __global half *p); +float8 __ovld vload_half8(size_t offset, const __global half *p); +float16 __ovld vload_half16(size_t offset, const __global half *p); +float2 __ovld vload_half2(size_t offset, const __local half *p); +float3 __ovld vload_half3(size_t offset, const __local half *p); +float4 __ovld vload_half4(size_t offset, const __local half *p); +float8 __ovld vload_half8(size_t offset, const __local half *p); +float16 __ovld vload_half16(size_t offset, const __local half *p); +float2 __ovld vload_half2(size_t offset, const __private half *p); +float3 __ovld vload_half3(size_t offset, const __private half *p); +float4 __ovld vload_half4(size_t offset, const __private half *p); +float8 __ovld vload_half8(size_t offset, const __private half *p); +float16 __ovld vload_half16(size_t offset, const __private half *p); +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +/** + * The float value given by data is first + * converted to a half value using the appropriate + * rounding mode. The half value is then written + * to address computed as (p + offset). The + * address computed as (p + offset) must be 16- + * bit aligned. + * vstore_half use the current rounding mode. + * The default current rounding mode is round to + * nearest even. + */ +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +void __ovld vstore_half(float data, size_t offset, half *p); +void __ovld vstore_half_rte(float data, size_t offset, half *p); +void __ovld vstore_half_rtz(float data, size_t offset, half *p); +void __ovld vstore_half_rtp(float data, size_t offset, half *p); +void __ovld vstore_half_rtn(float data, size_t offset, half *p); +#ifdef cl_khr_fp64 +void __ovld vstore_half(double data, size_t offset, half *p); +void __ovld vstore_half_rte(double data, size_t offset, half *p); +void __ovld vstore_half_rtz(double data, size_t offset, half *p); +void __ovld vstore_half_rtp(double data, size_t offset, half *p); +void __ovld vstore_half_rtn(double data, size_t offset, half *p); +#endif //cl_khr_fp64 +#else +void __ovld vstore_half(float data, size_t offset, __global half *p); +void __ovld vstore_half_rte(float data, size_t offset, __global half *p); +void __ovld vstore_half_rtz(float data, size_t offset, __global half *p); +void __ovld vstore_half_rtp(float data, size_t offset, __global half *p); +void __ovld vstore_half_rtn(float data, size_t offset, __global half *p); +void __ovld vstore_half(float data, size_t offset, __local half *p); +void __ovld vstore_half_rte(float data, size_t offset, __local half *p); +void __ovld vstore_half_rtz(float data, size_t offset, __local half *p); +void __ovld vstore_half_rtp(float data, size_t offset, __local half *p); +void __ovld vstore_half_rtn(float data, size_t offset, __local half *p); +void __ovld vstore_half(float data, size_t offset, __private half *p); +void __ovld vstore_half_rte(float data, size_t offset, __private half *p); +void __ovld vstore_half_rtz(float data, size_t offset, __private half *p); +void __ovld vstore_half_rtp(float data, size_t offset, __private half *p); +void __ovld vstore_half_rtn(float data, size_t offset, __private half *p); +#ifdef cl_khr_fp64 +void __ovld vstore_half(double data, size_t offset, __global half *p); +void __ovld vstore_half_rte(double data, size_t offset, __global half *p); +void __ovld vstore_half_rtz(double data, size_t offset, __global half *p); +void __ovld vstore_half_rtp(double data, size_t offset, __global half *p); +void __ovld vstore_half_rtn(double data, size_t offset, __global half *p); +void __ovld vstore_half(double data, size_t offset, __local half *p); +void __ovld vstore_half_rte(double data, size_t offset, __local half *p); +void __ovld vstore_half_rtz(double data, size_t offset, __local half *p); +void __ovld vstore_half_rtp(double data, size_t offset, __local half *p); +void __ovld vstore_half_rtn(double data, size_t offset, __local half *p); +void __ovld vstore_half(double data, size_t offset, __private half *p); +void __ovld vstore_half_rte(double data, size_t offset, __private half *p); +void __ovld vstore_half_rtz(double data, size_t offset, __private half *p); +void __ovld vstore_half_rtp(double data, size_t offset, __private half *p); +void __ovld vstore_half_rtn(double data, size_t offset, __private half *p); +#endif //cl_khr_fp64 +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +/** + * The floatn value given by data is converted to + * a halfn value using the appropriate rounding + * mode. The halfn value is then written to + * address computed as (p + (offset * n)). The + * address computed as (p + (offset * n)) must be + * 16-bit aligned. + * vstore_halfn uses the current rounding mode. + * The default current rounding mode is round to + * nearest even. + */ +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +void __ovld vstore_half2(float2 data, size_t offset, half *p); +void __ovld vstore_half3(float3 data, size_t offset, half *p); +void __ovld vstore_half4(float4 data, size_t offset, half *p); +void __ovld vstore_half8(float8 data, size_t offset, half *p); +void __ovld vstore_half16(float16 data, size_t offset, half *p); +void __ovld vstore_half2_rte(float2 data, size_t offset, half *p); +void __ovld vstore_half3_rte(float3 data, size_t offset, half *p); +void __ovld vstore_half4_rte(float4 data, size_t offset, half *p); +void __ovld vstore_half8_rte(float8 data, size_t offset, half *p); +void __ovld vstore_half16_rte(float16 data, size_t offset, half *p); +void __ovld vstore_half2_rtz(float2 data, size_t offset, half *p); +void __ovld vstore_half3_rtz(float3 data, size_t offset, half *p); +void __ovld vstore_half4_rtz(float4 data, size_t offset, half *p); +void __ovld vstore_half8_rtz(float8 data, size_t offset, half *p); +void __ovld vstore_half16_rtz(float16 data, size_t offset, half *p); +void __ovld vstore_half2_rtp(float2 data, size_t offset, half *p); +void __ovld vstore_half3_rtp(float3 data, size_t offset, half *p); +void __ovld vstore_half4_rtp(float4 data, size_t offset, half *p); +void __ovld vstore_half8_rtp(float8 data, size_t offset, half *p); +void __ovld vstore_half16_rtp(float16 data, size_t offset, half *p); +void __ovld vstore_half2_rtn(float2 data, size_t offset, half *p); +void __ovld vstore_half3_rtn(float3 data, size_t offset, half *p); +void __ovld vstore_half4_rtn(float4 data, size_t offset, half *p); +void __ovld vstore_half8_rtn(float8 data, size_t offset, half *p); +void __ovld vstore_half16_rtn(float16 data, size_t offset, half *p); +#ifdef cl_khr_fp64 +void __ovld vstore_half2(double2 data, size_t offset, half *p); +void __ovld vstore_half3(double3 data, size_t offset, half *p); +void __ovld vstore_half4(double4 data, size_t offset, half *p); +void __ovld vstore_half8(double8 data, size_t offset, half *p); +void __ovld vstore_half16(double16 data, size_t offset, half *p); +void __ovld vstore_half2_rte(double2 data, size_t offset, half *p); +void __ovld vstore_half3_rte(double3 data, size_t offset, half *p); +void __ovld vstore_half4_rte(double4 data, size_t offset, half *p); +void __ovld vstore_half8_rte(double8 data, size_t offset, half *p); +void __ovld vstore_half16_rte(double16 data, size_t offset, half *p); +void __ovld vstore_half2_rtz(double2 data, size_t offset, half *p); +void __ovld vstore_half3_rtz(double3 data, size_t offset, half *p); +void __ovld vstore_half4_rtz(double4 data, size_t offset, half *p); +void __ovld vstore_half8_rtz(double8 data, size_t offset, half *p); +void __ovld vstore_half16_rtz(double16 data, size_t offset, half *p); +void __ovld vstore_half2_rtp(double2 data, size_t offset, half *p); +void __ovld vstore_half3_rtp(double3 data, size_t offset, half *p); +void __ovld vstore_half4_rtp(double4 data, size_t offset, half *p); +void __ovld vstore_half8_rtp(double8 data, size_t offset, half *p); +void __ovld vstore_half16_rtp(double16 data, size_t offset, half *p); +void __ovld vstore_half2_rtn(double2 data, size_t offset, half *p); +void __ovld vstore_half3_rtn(double3 data, size_t offset, half *p); +void __ovld vstore_half4_rtn(double4 data, size_t offset, half *p); +void __ovld vstore_half8_rtn(double8 data, size_t offset, half *p); +void __ovld vstore_half16_rtn(double16 data, size_t offset, half *p); +#endif //cl_khr_fp64 +#else +void __ovld vstore_half2(float2 data, size_t offset, __global half *p); +void __ovld vstore_half3(float3 data, size_t offset, __global half *p); +void __ovld vstore_half4(float4 data, size_t offset, __global half *p); +void __ovld vstore_half8(float8 data, size_t offset, __global half *p); +void __ovld vstore_half16(float16 data, size_t offset, __global half *p); +void __ovld vstore_half2_rte(float2 data, size_t offset, __global half *p); +void __ovld vstore_half3_rte(float3 data, size_t offset, __global half *p); +void __ovld vstore_half4_rte(float4 data, size_t offset, __global half *p); +void __ovld vstore_half8_rte(float8 data, size_t offset, __global half *p); +void __ovld vstore_half16_rte(float16 data, size_t offset, __global half *p); +void __ovld vstore_half2_rtz(float2 data, size_t offset, __global half *p); +void __ovld vstore_half3_rtz(float3 data, size_t offset, __global half *p); +void __ovld vstore_half4_rtz(float4 data, size_t offset, __global half *p); +void __ovld vstore_half8_rtz(float8 data, size_t offset, __global half *p); +void __ovld vstore_half16_rtz(float16 data, size_t offset, __global half *p); +void __ovld vstore_half2_rtp(float2 data, size_t offset, __global half *p); +void __ovld vstore_half3_rtp(float3 data, size_t offset, __global half *p); +void __ovld vstore_half4_rtp(float4 data, size_t offset, __global half *p); +void __ovld vstore_half8_rtp(float8 data, size_t offset, __global half *p); +void __ovld vstore_half16_rtp(float16 data, size_t offset, __global half *p); +void __ovld vstore_half2_rtn(float2 data, size_t offset, __global half *p); +void __ovld vstore_half3_rtn(float3 data, size_t offset, __global half *p); +void __ovld vstore_half4_rtn(float4 data, size_t offset, __global half *p); +void __ovld vstore_half8_rtn(float8 data, size_t offset, __global half *p); +void __ovld vstore_half16_rtn(float16 data, size_t offset, __global half *p); +void __ovld vstore_half2(float2 data, size_t offset, __local half *p); +void __ovld vstore_half3(float3 data, size_t offset, __local half *p); +void __ovld vstore_half4(float4 data, size_t offset, __local half *p); +void __ovld vstore_half8(float8 data, size_t offset, __local half *p); +void __ovld vstore_half16(float16 data, size_t offset, __local half *p); +void __ovld vstore_half2_rte(float2 data, size_t offset, __local half *p); +void __ovld vstore_half3_rte(float3 data, size_t offset, __local half *p); +void __ovld vstore_half4_rte(float4 data, size_t offset, __local half *p); +void __ovld vstore_half8_rte(float8 data, size_t offset, __local half *p); +void __ovld vstore_half16_rte(float16 data, size_t offset, __local half *p); +void __ovld vstore_half2_rtz(float2 data, size_t offset, __local half *p); +void __ovld vstore_half3_rtz(float3 data, size_t offset, __local half *p); +void __ovld vstore_half4_rtz(float4 data, size_t offset, __local half *p); +void __ovld vstore_half8_rtz(float8 data, size_t offset, __local half *p); +void __ovld vstore_half16_rtz(float16 data, size_t offset, __local half *p); +void __ovld vstore_half2_rtp(float2 data, size_t offset, __local half *p); +void __ovld vstore_half3_rtp(float3 data, size_t offset, __local half *p); +void __ovld vstore_half4_rtp(float4 data, size_t offset, __local half *p); +void __ovld vstore_half8_rtp(float8 data, size_t offset, __local half *p); +void __ovld vstore_half16_rtp(float16 data, size_t offset, __local half *p); +void __ovld vstore_half2_rtn(float2 data, size_t offset, __local half *p); +void __ovld vstore_half3_rtn(float3 data, size_t offset, __local half *p); +void __ovld vstore_half4_rtn(float4 data, size_t offset, __local half *p); +void __ovld vstore_half8_rtn(float8 data, size_t offset, __local half *p); +void __ovld vstore_half16_rtn(float16 data, size_t offset, __local half *p); +void __ovld vstore_half2(float2 data, size_t offset, __private half *p); +void __ovld vstore_half3(float3 data, size_t offset, __private half *p); +void __ovld vstore_half4(float4 data, size_t offset, __private half *p); +void __ovld vstore_half8(float8 data, size_t offset, __private half *p); +void __ovld vstore_half16(float16 data, size_t offset, __private half *p); +void __ovld vstore_half2_rte(float2 data, size_t offset, __private half *p); +void __ovld vstore_half3_rte(float3 data, size_t offset, __private half *p); +void __ovld vstore_half4_rte(float4 data, size_t offset, __private half *p); +void __ovld vstore_half8_rte(float8 data, size_t offset, __private half *p); +void __ovld vstore_half16_rte(float16 data, size_t offset, __private half *p); +void __ovld vstore_half2_rtz(float2 data, size_t offset, __private half *p); +void __ovld vstore_half3_rtz(float3 data, size_t offset, __private half *p); +void __ovld vstore_half4_rtz(float4 data, size_t offset, __private half *p); +void __ovld vstore_half8_rtz(float8 data, size_t offset, __private half *p); +void __ovld vstore_half16_rtz(float16 data, size_t offset, __private half *p); +void __ovld vstore_half2_rtp(float2 data, size_t offset, __private half *p); +void __ovld vstore_half3_rtp(float3 data, size_t offset, __private half *p); +void __ovld vstore_half4_rtp(float4 data, size_t offset, __private half *p); +void __ovld vstore_half8_rtp(float8 data, size_t offset, __private half *p); +void __ovld vstore_half16_rtp(float16 data, size_t offset, __private half *p); +void __ovld vstore_half2_rtn(float2 data, size_t offset, __private half *p); +void __ovld vstore_half3_rtn(float3 data, size_t offset, __private half *p); +void __ovld vstore_half4_rtn(float4 data, size_t offset, __private half *p); +void __ovld vstore_half8_rtn(float8 data, size_t offset, __private half *p); +void __ovld vstore_half16_rtn(float16 data, size_t offset, __private half *p); +#ifdef cl_khr_fp64 +void __ovld vstore_half2(double2 data, size_t offset, __global half *p); +void __ovld vstore_half3(double3 data, size_t offset, __global half *p); +void __ovld vstore_half4(double4 data, size_t offset, __global half *p); +void __ovld vstore_half8(double8 data, size_t offset, __global half *p); +void __ovld vstore_half16(double16 data, size_t offset, __global half *p); +void __ovld vstore_half2_rte(double2 data, size_t offset, __global half *p); +void __ovld vstore_half3_rte(double3 data, size_t offset, __global half *p); +void __ovld vstore_half4_rte(double4 data, size_t offset, __global half *p); +void __ovld vstore_half8_rte(double8 data, size_t offset, __global half *p); +void __ovld vstore_half16_rte(double16 data, size_t offset, __global half *p); +void __ovld vstore_half2_rtz(double2 data, size_t offset, __global half *p); +void __ovld vstore_half3_rtz(double3 data, size_t offset, __global half *p); +void __ovld vstore_half4_rtz(double4 data, size_t offset, __global half *p); +void __ovld vstore_half8_rtz(double8 data, size_t offset, __global half *p); +void __ovld vstore_half16_rtz(double16 data, size_t offset, __global half *p); +void __ovld vstore_half2_rtp(double2 data, size_t offset, __global half *p); +void __ovld vstore_half3_rtp(double3 data, size_t offset, __global half *p); +void __ovld vstore_half4_rtp(double4 data, size_t offset, __global half *p); +void __ovld vstore_half8_rtp(double8 data, size_t offset, __global half *p); +void __ovld vstore_half16_rtp(double16 data, size_t offset, __global half *p); +void __ovld vstore_half2_rtn(double2 data, size_t offset, __global half *p); +void __ovld vstore_half3_rtn(double3 data, size_t offset, __global half *p); +void __ovld vstore_half4_rtn(double4 data, size_t offset, __global half *p); +void __ovld vstore_half8_rtn(double8 data, size_t offset, __global half *p); +void __ovld vstore_half16_rtn(double16 data, size_t offset, __global half *p); +void __ovld vstore_half2(double2 data, size_t offset, __local half *p); +void __ovld vstore_half3(double3 data, size_t offset, __local half *p); +void __ovld vstore_half4(double4 data, size_t offset, __local half *p); +void __ovld vstore_half8(double8 data, size_t offset, __local half *p); +void __ovld vstore_half16(double16 data, size_t offset, __local half *p); +void __ovld vstore_half2_rte(double2 data, size_t offset, __local half *p); +void __ovld vstore_half3_rte(double3 data, size_t offset, __local half *p); +void __ovld vstore_half4_rte(double4 data, size_t offset, __local half *p); +void __ovld vstore_half8_rte(double8 data, size_t offset, __local half *p); +void __ovld vstore_half16_rte(double16 data, size_t offset, __local half *p); +void __ovld vstore_half2_rtz(double2 data, size_t offset, __local half *p); +void __ovld vstore_half3_rtz(double3 data, size_t offset, __local half *p); +void __ovld vstore_half4_rtz(double4 data, size_t offset, __local half *p); +void __ovld vstore_half8_rtz(double8 data, size_t offset, __local half *p); +void __ovld vstore_half16_rtz(double16 data, size_t offset, __local half *p); +void __ovld vstore_half2_rtp(double2 data, size_t offset, __local half *p); +void __ovld vstore_half3_rtp(double3 data, size_t offset, __local half *p); +void __ovld vstore_half4_rtp(double4 data, size_t offset, __local half *p); +void __ovld vstore_half8_rtp(double8 data, size_t offset, __local half *p); +void __ovld vstore_half16_rtp(double16 data, size_t offset, __local half *p); +void __ovld vstore_half2_rtn(double2 data, size_t offset, __local half *p); +void __ovld vstore_half3_rtn(double3 data, size_t offset, __local half *p); +void __ovld vstore_half4_rtn(double4 data, size_t offset, __local half *p); +void __ovld vstore_half8_rtn(double8 data, size_t offset, __local half *p); +void __ovld vstore_half16_rtn(double16 data, size_t offset, __local half *p); +void __ovld vstore_half2(double2 data, size_t offset, __private half *p); +void __ovld vstore_half3(double3 data, size_t offset, __private half *p); +void __ovld vstore_half4(double4 data, size_t offset, __private half *p); +void __ovld vstore_half8(double8 data, size_t offset, __private half *p); +void __ovld vstore_half16(double16 data, size_t offset, __private half *p); +void __ovld vstore_half2_rte(double2 data, size_t offset, __private half *p); +void __ovld vstore_half3_rte(double3 data, size_t offset, __private half *p); +void __ovld vstore_half4_rte(double4 data, size_t offset, __private half *p); +void __ovld vstore_half8_rte(double8 data, size_t offset, __private half *p); +void __ovld vstore_half16_rte(double16 data, size_t offset, __private half *p); +void __ovld vstore_half2_rtz(double2 data, size_t offset, __private half *p); +void __ovld vstore_half3_rtz(double3 data, size_t offset, __private half *p); +void __ovld vstore_half4_rtz(double4 data, size_t offset, __private half *p); +void __ovld vstore_half8_rtz(double8 data, size_t offset, __private half *p); +void __ovld vstore_half16_rtz(double16 data, size_t offset, __private half *p); +void __ovld vstore_half2_rtp(double2 data, size_t offset, __private half *p); +void __ovld vstore_half3_rtp(double3 data, size_t offset, __private half *p); +void __ovld vstore_half4_rtp(double4 data, size_t offset, __private half *p); +void __ovld vstore_half8_rtp(double8 data, size_t offset, __private half *p); +void __ovld vstore_half16_rtp(double16 data, size_t offset, __private half *p); +void __ovld vstore_half2_rtn(double2 data, size_t offset, __private half *p); +void __ovld vstore_half3_rtn(double3 data, size_t offset, __private half *p); +void __ovld vstore_half4_rtn(double4 data, size_t offset, __private half *p); +void __ovld vstore_half8_rtn(double8 data, size_t offset, __private half *p); +void __ovld vstore_half16_rtn(double16 data, size_t offset, __private half *p); +#endif //cl_khr_fp64 +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +/** + * For n = 1, 2, 4, 8 and 16 read sizeof (halfn) + * bytes of data from address (p + (offset * n)). + * The data read is interpreted as a halfn value. + * The halfn value read is converted to a floatn + * value and the floatn value is returned. + * The address computed as (p + (offset * n)) + * must be aligned to sizeof (halfn) bytes. + * For n = 3, vloada_half3 reads a half3 from + * address (p + (offset * 4)) and returns a float3. + * The address computed as (p + (offset * 4)) + * must be aligned to sizeof (half) * 4 bytes. + */ +float __ovld vloada_half(size_t offset, const __constant half *p); +float2 __ovld vloada_half2(size_t offset, const __constant half *p); +float3 __ovld vloada_half3(size_t offset, const __constant half *p); +float4 __ovld vloada_half4(size_t offset, const __constant half *p); +float8 __ovld vloada_half8(size_t offset, const __constant half *p); +float16 __ovld vloada_half16(size_t offset, const __constant half *p); +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +float __ovld vloada_half(size_t offset, const half *p); +float2 __ovld vloada_half2(size_t offset, const half *p); +float3 __ovld vloada_half3(size_t offset, const half *p); +float4 __ovld vloada_half4(size_t offset, const half *p); +float8 __ovld vloada_half8(size_t offset, const half *p); +float16 __ovld vloada_half16(size_t offset, const half *p); +#else +float __ovld vloada_half(size_t offset, const __global half *p); +float2 __ovld vloada_half2(size_t offset, const __global half *p); +float3 __ovld vloada_half3(size_t offset, const __global half *p); +float4 __ovld vloada_half4(size_t offset, const __global half *p); +float8 __ovld vloada_half8(size_t offset, const __global half *p); +float16 __ovld vloada_half16(size_t offset, const __global half *p); +float __ovld vloada_half(size_t offset, const __local half *p); +float2 __ovld vloada_half2(size_t offset, const __local half *p); +float3 __ovld vloada_half3(size_t offset, const __local half *p); +float4 __ovld vloada_half4(size_t offset, const __local half *p); +float8 __ovld vloada_half8(size_t offset, const __local half *p); +float16 __ovld vloada_half16(size_t offset, const __local half *p); +float __ovld vloada_half(size_t offset, const __private half *p); +float2 __ovld vloada_half2(size_t offset, const __private half *p); +float3 __ovld vloada_half3(size_t offset, const __private half *p); +float4 __ovld vloada_half4(size_t offset, const __private half *p); +float8 __ovld vloada_half8(size_t offset, const __private half *p); +float16 __ovld vloada_half16(size_t offset, const __private half *p); +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +/** + * The floatn value given by data is converted to + * a halfn value using the appropriate rounding + * mode. + * For n = 1, 2, 4, 8 and 16, the halfn value is + * written to the address computed as (p + (offset + * * n)). The address computed as (p + (offset * + * n)) must be aligned to sizeof (halfn) bytes. + * For n = 3, the half3 value is written to the + * address computed as (p + (offset * 4)). The + * address computed as (p + (offset * 4)) must be + * aligned to sizeof (half) * 4 bytes. + * vstorea_halfn uses the current rounding + * mode. The default current rounding mode is + * round to nearest even. + */ +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +void __ovld vstorea_half(float data, size_t offset, half *p); +void __ovld vstorea_half2(float2 data, size_t offset, half *p); +void __ovld vstorea_half3(float3 data, size_t offset, half *p); +void __ovld vstorea_half4(float4 data, size_t offset, half *p); +void __ovld vstorea_half8(float8 data, size_t offset, half *p); +void __ovld vstorea_half16(float16 data, size_t offset, half *p); + +void __ovld vstorea_half_rte(float data, size_t offset, half *p); +void __ovld vstorea_half2_rte(float2 data, size_t offset, half *p); +void __ovld vstorea_half3_rte(float3 data, size_t offset, half *p); +void __ovld vstorea_half4_rte(float4 data, size_t offset, half *p); +void __ovld vstorea_half8_rte(float8 data, size_t offset, half *p); +void __ovld vstorea_half16_rte(float16 data, size_t offset, half *p); + +void __ovld vstorea_half_rtz(float data, size_t offset, half *p); +void __ovld vstorea_half2_rtz(float2 data, size_t offset, half *p); +void __ovld vstorea_half3_rtz(float3 data, size_t offset, half *p); +void __ovld vstorea_half4_rtz(float4 data, size_t offset, half *p); +void __ovld vstorea_half8_rtz(float8 data, size_t offset, half *p); +void __ovld vstorea_half16_rtz(float16 data, size_t offset, half *p); + +void __ovld vstorea_half_rtp(float data, size_t offset, half *p); +void __ovld vstorea_half2_rtp(float2 data, size_t offset, half *p); +void __ovld vstorea_half3_rtp(float3 data, size_t offset, half *p); +void __ovld vstorea_half4_rtp(float4 data, size_t offset, half *p); +void __ovld vstorea_half8_rtp(float8 data, size_t offset, half *p); +void __ovld vstorea_half16_rtp(float16 data, size_t offset, half *p); + +void __ovld vstorea_half_rtn(float data, size_t offset, half *p); +void __ovld vstorea_half2_rtn(float2 data, size_t offset, half *p); +void __ovld vstorea_half3_rtn(float3 data, size_t offset, half *p); +void __ovld vstorea_half4_rtn(float4 data, size_t offset, half *p); +void __ovld vstorea_half8_rtn(float8 data, size_t offset, half *p); +void __ovld vstorea_half16_rtn(float16 data, size_t offset, half *p); + +#ifdef cl_khr_fp64 +void __ovld vstorea_half(double data, size_t offset, half *p); +void __ovld vstorea_half2(double2 data, size_t offset, half *p); +void __ovld vstorea_half3(double3 data, size_t offset, half *p); +void __ovld vstorea_half4(double4 data, size_t offset, half *p); +void __ovld vstorea_half8(double8 data, size_t offset, half *p); +void __ovld vstorea_half16(double16 data, size_t offset, half *p); + +void __ovld vstorea_half_rte(double data, size_t offset, half *p); +void __ovld vstorea_half2_rte(double2 data, size_t offset, half *p); +void __ovld vstorea_half3_rte(double3 data, size_t offset, half *p); +void __ovld vstorea_half4_rte(double4 data, size_t offset, half *p); +void __ovld vstorea_half8_rte(double8 data, size_t offset, half *p); +void __ovld vstorea_half16_rte(double16 data, size_t offset, half *p); + +void __ovld vstorea_half_rtz(double data, size_t offset, half *p); +void __ovld vstorea_half2_rtz(double2 data, size_t offset, half *p); +void __ovld vstorea_half3_rtz(double3 data, size_t offset, half *p); +void __ovld vstorea_half4_rtz(double4 data, size_t offset, half *p); +void __ovld vstorea_half8_rtz(double8 data, size_t offset, half *p); +void __ovld vstorea_half16_rtz(double16 data, size_t offset, half *p); + +void __ovld vstorea_half_rtp(double data, size_t offset, half *p); +void __ovld vstorea_half2_rtp(double2 data, size_t offset, half *p); +void __ovld vstorea_half3_rtp(double3 data, size_t offset, half *p); +void __ovld vstorea_half4_rtp(double4 data, size_t offset, half *p); +void __ovld vstorea_half8_rtp(double8 data, size_t offset, half *p); +void __ovld vstorea_half16_rtp(double16 data, size_t offset, half *p); + +void __ovld vstorea_half_rtn(double data, size_t offset, half *p); +void __ovld vstorea_half2_rtn(double2 data, size_t offset, half *p); +void __ovld vstorea_half3_rtn(double3 data, size_t offset, half *p); +void __ovld vstorea_half4_rtn(double4 data, size_t offset, half *p); +void __ovld vstorea_half8_rtn(double8 data, size_t offset, half *p); +void __ovld vstorea_half16_rtn(double16 data, size_t offset, half *p); +#endif //cl_khr_fp64 + +#else +void __ovld vstorea_half(float data, size_t offset, __global half *p); +void __ovld vstorea_half2(float2 data, size_t offset, __global half *p); +void __ovld vstorea_half3(float3 data, size_t offset, __global half *p); +void __ovld vstorea_half4(float4 data, size_t offset, __global half *p); +void __ovld vstorea_half8(float8 data, size_t offset, __global half *p); +void __ovld vstorea_half16(float16 data, size_t offset, __global half *p); + +void __ovld vstorea_half_rte(float data, size_t offset, __global half *p); +void __ovld vstorea_half2_rte(float2 data, size_t offset, __global half *p); +void __ovld vstorea_half3_rte(float3 data, size_t offset, __global half *p); +void __ovld vstorea_half4_rte(float4 data, size_t offset, __global half *p); +void __ovld vstorea_half8_rte(float8 data, size_t offset, __global half *p); +void __ovld vstorea_half16_rte(float16 data, size_t offset, __global half *p); + +void __ovld vstorea_half_rtz(float data, size_t offset, __global half *p); +void __ovld vstorea_half2_rtz(float2 data, size_t offset, __global half *p); +void __ovld vstorea_half3_rtz(float3 data, size_t offset, __global half *p); +void __ovld vstorea_half4_rtz(float4 data, size_t offset, __global half *p); +void __ovld vstorea_half8_rtz(float8 data, size_t offset, __global half *p); +void __ovld vstorea_half16_rtz(float16 data, size_t offset, __global half *p); + +void __ovld vstorea_half_rtp(float data, size_t offset, __global half *p); +void __ovld vstorea_half2_rtp(float2 data, size_t offset, __global half *p); +void __ovld vstorea_half3_rtp(float3 data, size_t offset, __global half *p); +void __ovld vstorea_half4_rtp(float4 data, size_t offset, __global half *p); +void __ovld vstorea_half8_rtp(float8 data, size_t offset, __global half *p); +void __ovld vstorea_half16_rtp(float16 data, size_t offset, __global half *p); + +void __ovld vstorea_half_rtn(float data, size_t offset, __global half *p); +void __ovld vstorea_half2_rtn(float2 data, size_t offset, __global half *p); +void __ovld vstorea_half3_rtn(float3 data, size_t offset, __global half *p); +void __ovld vstorea_half4_rtn(float4 data, size_t offset, __global half *p); +void __ovld vstorea_half8_rtn(float8 data, size_t offset, __global half *p); +void __ovld vstorea_half16_rtn(float16 data, size_t offset, __global half *p); + +void __ovld vstorea_half(float data, size_t offset, __local half *p); +void __ovld vstorea_half2(float2 data, size_t offset, __local half *p); +void __ovld vstorea_half3(float3 data, size_t offset, __local half *p); +void __ovld vstorea_half4(float4 data, size_t offset, __local half *p); +void __ovld vstorea_half8(float8 data, size_t offset, __local half *p); +void __ovld vstorea_half16(float16 data, size_t offset, __local half *p); + +void __ovld vstorea_half_rte(float data, size_t offset, __local half *p); +void __ovld vstorea_half2_rte(float2 data, size_t offset, __local half *p); +void __ovld vstorea_half3_rte(float3 data, size_t offset, __local half *p); +void __ovld vstorea_half4_rte(float4 data, size_t offset, __local half *p); +void __ovld vstorea_half8_rte(float8 data, size_t offset, __local half *p); +void __ovld vstorea_half16_rte(float16 data, size_t offset, __local half *p); + +void __ovld vstorea_half_rtz(float data, size_t offset, __local half *p); +void __ovld vstorea_half2_rtz(float2 data, size_t offset, __local half *p); +void __ovld vstorea_half3_rtz(float3 data, size_t offset, __local half *p); +void __ovld vstorea_half4_rtz(float4 data, size_t offset, __local half *p); +void __ovld vstorea_half8_rtz(float8 data, size_t offset, __local half *p); +void __ovld vstorea_half16_rtz(float16 data, size_t offset, __local half *p); + +void __ovld vstorea_half_rtp(float data, size_t offset, __local half *p); +void __ovld vstorea_half2_rtp(float2 data, size_t offset, __local half *p); +void __ovld vstorea_half3_rtp(float3 data, size_t offset, __local half *p); +void __ovld vstorea_half4_rtp(float4 data, size_t offset, __local half *p); +void __ovld vstorea_half8_rtp(float8 data, size_t offset, __local half *p); +void __ovld vstorea_half16_rtp(float16 data, size_t offset, __local half *p); + +void __ovld vstorea_half_rtn(float data, size_t offset, __local half *p); +void __ovld vstorea_half2_rtn(float2 data, size_t offset, __local half *p); +void __ovld vstorea_half3_rtn(float3 data, size_t offset, __local half *p); +void __ovld vstorea_half4_rtn(float4 data, size_t offset, __local half *p); +void __ovld vstorea_half8_rtn(float8 data, size_t offset, __local half *p); +void __ovld vstorea_half16_rtn(float16 data, size_t offset, __local half *p); + +void __ovld vstorea_half(float data, size_t offset, __private half *p); +void __ovld vstorea_half2(float2 data, size_t offset, __private half *p); +void __ovld vstorea_half3(float3 data, size_t offset, __private half *p); +void __ovld vstorea_half4(float4 data, size_t offset, __private half *p); +void __ovld vstorea_half8(float8 data, size_t offset, __private half *p); +void __ovld vstorea_half16(float16 data, size_t offset, __private half *p); + +void __ovld vstorea_half_rte(float data, size_t offset, __private half *p); +void __ovld vstorea_half2_rte(float2 data, size_t offset, __private half *p); +void __ovld vstorea_half3_rte(float3 data, size_t offset, __private half *p); +void __ovld vstorea_half4_rte(float4 data, size_t offset, __private half *p); +void __ovld vstorea_half8_rte(float8 data, size_t offset, __private half *p); +void __ovld vstorea_half16_rte(float16 data, size_t offset, __private half *p); + +void __ovld vstorea_half_rtz(float data, size_t offset, __private half *p); +void __ovld vstorea_half2_rtz(float2 data, size_t offset, __private half *p); +void __ovld vstorea_half3_rtz(float3 data, size_t offset, __private half *p); +void __ovld vstorea_half4_rtz(float4 data, size_t offset, __private half *p); +void __ovld vstorea_half8_rtz(float8 data, size_t offset, __private half *p); +void __ovld vstorea_half16_rtz(float16 data, size_t offset, __private half *p); + +void __ovld vstorea_half_rtp(float data, size_t offset, __private half *p); +void __ovld vstorea_half2_rtp(float2 data, size_t offset, __private half *p); +void __ovld vstorea_half3_rtp(float3 data, size_t offset, __private half *p); +void __ovld vstorea_half4_rtp(float4 data, size_t offset, __private half *p); +void __ovld vstorea_half8_rtp(float8 data, size_t offset, __private half *p); +void __ovld vstorea_half16_rtp(float16 data, size_t offset, __private half *p); + +void __ovld vstorea_half_rtn(float data, size_t offset, __private half *p); +void __ovld vstorea_half2_rtn(float2 data, size_t offset, __private half *p); +void __ovld vstorea_half3_rtn(float3 data, size_t offset, __private half *p); +void __ovld vstorea_half4_rtn(float4 data, size_t offset, __private half *p); +void __ovld vstorea_half8_rtn(float8 data, size_t offset, __private half *p); +void __ovld vstorea_half16_rtn(float16 data, size_t offset, __private half *p); + +#ifdef cl_khr_fp64 +void __ovld vstorea_half(double data, size_t offset, __global half *p); +void __ovld vstorea_half2(double2 data, size_t offset, __global half *p); +void __ovld vstorea_half3(double3 data, size_t offset, __global half *p); +void __ovld vstorea_half4(double4 data, size_t offset, __global half *p); +void __ovld vstorea_half8(double8 data, size_t offset, __global half *p); +void __ovld vstorea_half16(double16 data, size_t offset, __global half *p); + +void __ovld vstorea_half_rte(double data, size_t offset, __global half *p); +void __ovld vstorea_half2_rte(double2 data, size_t offset, __global half *p); +void __ovld vstorea_half3_rte(double3 data, size_t offset, __global half *p); +void __ovld vstorea_half4_rte(double4 data, size_t offset, __global half *p); +void __ovld vstorea_half8_rte(double8 data, size_t offset, __global half *p); +void __ovld vstorea_half16_rte(double16 data, size_t offset, __global half *p); + +void __ovld vstorea_half_rtz(double data, size_t offset, __global half *p); +void __ovld vstorea_half2_rtz(double2 data, size_t offset, __global half *p); +void __ovld vstorea_half3_rtz(double3 data, size_t offset, __global half *p); +void __ovld vstorea_half4_rtz(double4 data, size_t offset, __global half *p); +void __ovld vstorea_half8_rtz(double8 data, size_t offset, __global half *p); +void __ovld vstorea_half16_rtz(double16 data, size_t offset, __global half *p); + +void __ovld vstorea_half_rtp(double data, size_t offset, __global half *p); +void __ovld vstorea_half2_rtp(double2 data, size_t offset, __global half *p); +void __ovld vstorea_half3_rtp(double3 data, size_t offset, __global half *p); +void __ovld vstorea_half4_rtp(double4 data, size_t offset, __global half *p); +void __ovld vstorea_half8_rtp(double8 data, size_t offset, __global half *p); +void __ovld vstorea_half16_rtp(double16 data, size_t offset, __global half *p); + +void __ovld vstorea_half_rtn(double data, size_t offset, __global half *p); +void __ovld vstorea_half2_rtn(double2 data, size_t offset, __global half *p); +void __ovld vstorea_half3_rtn(double3 data, size_t offset, __global half *p); +void __ovld vstorea_half4_rtn(double4 data, size_t offset, __global half *p); +void __ovld vstorea_half8_rtn(double8 data, size_t offset, __global half *p); +void __ovld vstorea_half16_rtn(double16 data, size_t offset, __global half *p); + +void __ovld vstorea_half(double data, size_t offset, __local half *p); +void __ovld vstorea_half2(double2 data, size_t offset, __local half *p); +void __ovld vstorea_half3(double3 data, size_t offset, __local half *p); +void __ovld vstorea_half4(double4 data, size_t offset, __local half *p); +void __ovld vstorea_half8(double8 data, size_t offset, __local half *p); +void __ovld vstorea_half16(double16 data, size_t offset, __local half *p); + +void __ovld vstorea_half_rte(double data, size_t offset, __local half *p); +void __ovld vstorea_half2_rte(double2 data, size_t offset, __local half *p); +void __ovld vstorea_half3_rte(double3 data, size_t offset, __local half *p); +void __ovld vstorea_half4_rte(double4 data, size_t offset, __local half *p); +void __ovld vstorea_half8_rte(double8 data, size_t offset, __local half *p); +void __ovld vstorea_half16_rte(double16 data, size_t offset, __local half *p); + +void __ovld vstorea_half_rtz(double data, size_t offset, __local half *p); +void __ovld vstorea_half2_rtz(double2 data, size_t offset, __local half *p); +void __ovld vstorea_half3_rtz(double3 data, size_t offset, __local half *p); +void __ovld vstorea_half4_rtz(double4 data, size_t offset, __local half *p); +void __ovld vstorea_half8_rtz(double8 data, size_t offset, __local half *p); +void __ovld vstorea_half16_rtz(double16 data, size_t offset, __local half *p); + +void __ovld vstorea_half_rtp(double data, size_t offset, __local half *p); +void __ovld vstorea_half2_rtp(double2 data, size_t offset, __local half *p); +void __ovld vstorea_half3_rtp(double3 data, size_t offset, __local half *p); +void __ovld vstorea_half4_rtp(double4 data, size_t offset, __local half *p); +void __ovld vstorea_half8_rtp(double8 data, size_t offset, __local half *p); +void __ovld vstorea_half16_rtp(double16 data, size_t offset, __local half *p); + +void __ovld vstorea_half_rtn(double data, size_t offset, __local half *p); +void __ovld vstorea_half2_rtn(double2 data, size_t offset, __local half *p); +void __ovld vstorea_half3_rtn(double3 data, size_t offset, __local half *p); +void __ovld vstorea_half4_rtn(double4 data, size_t offset, __local half *p); +void __ovld vstorea_half8_rtn(double8 data, size_t offset, __local half *p); +void __ovld vstorea_half16_rtn(double16 data, size_t offset, __local half *p); + +void __ovld vstorea_half(double data, size_t offset, __private half *p); +void __ovld vstorea_half2(double2 data, size_t offset, __private half *p); +void __ovld vstorea_half3(double3 data, size_t offset, __private half *p); +void __ovld vstorea_half4(double4 data, size_t offset, __private half *p); +void __ovld vstorea_half8(double8 data, size_t offset, __private half *p); +void __ovld vstorea_half16(double16 data, size_t offset, __private half *p); + +void __ovld vstorea_half_rte(double data, size_t offset, __private half *p); +void __ovld vstorea_half2_rte(double2 data, size_t offset, __private half *p); +void __ovld vstorea_half3_rte(double3 data, size_t offset, __private half *p); +void __ovld vstorea_half4_rte(double4 data, size_t offset, __private half *p); +void __ovld vstorea_half8_rte(double8 data, size_t offset, __private half *p); +void __ovld vstorea_half16_rte(double16 data, size_t offset, __private half *p); + +void __ovld vstorea_half_rtz(double data, size_t offset, __private half *p); +void __ovld vstorea_half2_rtz(double2 data, size_t offset, __private half *p); +void __ovld vstorea_half3_rtz(double3 data, size_t offset, __private half *p); +void __ovld vstorea_half4_rtz(double4 data, size_t offset, __private half *p); +void __ovld vstorea_half8_rtz(double8 data, size_t offset, __private half *p); +void __ovld vstorea_half16_rtz(double16 data, size_t offset, __private half *p); + +void __ovld vstorea_half_rtp(double data, size_t offset, __private half *p); +void __ovld vstorea_half2_rtp(double2 data, size_t offset, __private half *p); +void __ovld vstorea_half3_rtp(double3 data, size_t offset, __private half *p); +void __ovld vstorea_half4_rtp(double4 data, size_t offset, __private half *p); +void __ovld vstorea_half8_rtp(double8 data, size_t offset, __private half *p); +void __ovld vstorea_half16_rtp(double16 data, size_t offset, __private half *p); + +void __ovld vstorea_half_rtn(double data, size_t offset, __private half *p); +void __ovld vstorea_half2_rtn(double2 data,size_t offset, __private half *p); +void __ovld vstorea_half3_rtn(double3 data,size_t offset, __private half *p); +void __ovld vstorea_half4_rtn(double4 data,size_t offset, __private half *p); +void __ovld vstorea_half8_rtn(double8 data,size_t offset, __private half *p); +void __ovld vstorea_half16_rtn(double16 data,size_t offset, __private half *p); +#endif //cl_khr_fp64 +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +// OpenCL v1.1 s6.11.8, v1.2 s6.12.8, v2.0 s6.13.8 - Synchronization Functions + +// Flag type and values for barrier, mem_fence, read_mem_fence, write_mem_fence +typedef uint cl_mem_fence_flags; + +/** + * Queue a memory fence to ensure correct + * ordering of memory operations to local memory + */ +#define CLK_LOCAL_MEM_FENCE 0x01 + +/** + * Queue a memory fence to ensure correct + * ordering of memory operations to global memory + */ +#define CLK_GLOBAL_MEM_FENCE 0x02 + +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +/** + * Queue a memory fence to ensure correct ordering of memory + * operations between work-items of a work-group to + * image memory. + */ +#define CLK_IMAGE_MEM_FENCE 0x04 +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +/** + * All work-items in a work-group executing the kernel + * on a processor must execute this function before any + * are allowed to continue execution beyond the barrier. + * This function must be encountered by all work-items in + * a work-group executing the kernel. + * If barrier is inside a conditional statement, then all + * work-items must enter the conditional if any work-item + * enters the conditional statement and executes the + * barrier. + * If barrer is inside a loop, all work-items must execute + * the barrier for each iteration of the loop before any are + * allowed to continue execution beyond the barrier. + * The barrier function also queues a memory fence + * (reads and writes) to ensure correct ordering of + * memory operations to local or global memory. + * The flags argument specifies the memory address space + * and can be set to a combination of the following literal + * values. + * CLK_LOCAL_MEM_FENCE - The barrier function + * will either flush any variables stored in local memory + * or queue a memory fence to ensure correct ordering of + * memory operations to local memory. + * CLK_GLOBAL_MEM_FENCE - The barrier function + * will queue a memory fence to ensure correct ordering + * of memory operations to global memory. This can be + * useful when work-items, for example, write to buffer or + * image objects and then want to read the updated data. + */ + +void __ovld __conv barrier(cl_mem_fence_flags flags); + +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +typedef enum memory_scope +{ + memory_scope_work_item, + memory_scope_work_group, + memory_scope_device, + memory_scope_all_svm_devices, + memory_scope_sub_group +} memory_scope; + +void __ovld __conv work_group_barrier(cl_mem_fence_flags flags, memory_scope scope); +void __ovld __conv work_group_barrier(cl_mem_fence_flags flags); +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +// OpenCL v1.1 s6.11.9, v1.2 s6.12.9 - Explicit Memory Fence Functions + +/** + * Orders loads and stores of a work-item + * executing a kernel. This means that loads + * and stores preceding the mem_fence will + * be committed to memory before any loads + * and stores following the mem_fence. + * The flags argument specifies the memory + * address space and can be set to a + * combination of the following literal + * values: + * CLK_LOCAL_MEM_FENCE + * CLK_GLOBAL_MEM_FENCE. + */ +void __ovld mem_fence(cl_mem_fence_flags flags); + +/** + * Read memory barrier that orders only + * loads. + * The flags argument specifies the memory + * address space and can be set to to a + * combination of the following literal + * values: + * CLK_LOCAL_MEM_FENCE + * CLK_GLOBAL_MEM_FENCE. + */ +void __ovld read_mem_fence(cl_mem_fence_flags flags); + +/** + * Write memory barrier that orders only + * stores. + * The flags argument specifies the memory + * address space and can be set to to a + * combination of the following literal + * values: + * CLK_LOCAL_MEM_FENCE + * CLK_GLOBAL_MEM_FENCE. + */ +void __ovld write_mem_fence(cl_mem_fence_flags flags); + +// OpenCL v2.0 s6.13.9 - Address Space Qualifier Functions + +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +cl_mem_fence_flags __ovld get_fence(const void *ptr); +cl_mem_fence_flags __ovld get_fence(void *ptr); + +/** + * Builtin functions to_global, to_local, and to_private need to be declared as Clang builtin functions + * and checked in Sema since they should be declared as + * addr gentype* to_addr (gentype*); + * where gentype is builtin type or user defined type. + */ + +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +// OpenCL v1.1 s6.11.10, v1.2 s6.12.10, v2.0 s6.13.10 - Async Copies from Global to Local Memory, Local to Global Memory, and Prefetch + +/** + * event_t async_work_group_copy ( + * __global gentype *dst, + * const __local gentype *src, + * size_t num_elements, + * event_t event) + * Perform an async copy of num_elements + * gentype elements from src to dst. The async + * copy is performed by all work-items in a workgroup + * and this built-in function must therefore + * be encountered by all work-items in a workgroup + * executing the kernel with the same + * argument values; otherwise the results are + * undefined. + * Returns an event object that can be used by + * wait_group_events to wait for the async copy + * to finish. The event argument can also be used + * to associate the async_work_group_copy with + * a previous async copy allowing an event to be + * shared by multiple async copies; otherwise event + * should be zero. + * If event argument is non-zero, the event object + * supplied in event argument will be returned. + * This function does not perform any implicit + * synchronization of source data such as using a + * barrier before performing the copy. + */ +event_t __ovld async_work_group_copy(__local char *dst, const __global char *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local uchar *dst, const __global uchar *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local short *dst, const __global short *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local ushort *dst, const __global ushort *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local int *dst, const __global int *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local uint *dst, const __global uint *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local long *dst, const __global long *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local ulong *dst, const __global ulong *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local float *dst, const __global float *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local char2 *dst, const __global char2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local uchar2 *dst, const __global uchar2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local short2 *dst, const __global short2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local ushort2 *dst, const __global ushort2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local int2 *dst, const __global int2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local uint2 *dst, const __global uint2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local long2 *dst, const __global long2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local ulong2 *dst, const __global ulong2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local float2 *dst, const __global float2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local char3 *dst, const __global char3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local uchar3 *dst, const __global uchar3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local short3 *dst, const __global short3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local ushort3 *dst, const __global ushort3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local int3 *dst, const __global int3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local uint3 *dst, const __global uint3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local long3 *dst, const __global long3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local ulong3 *dst, const __global ulong3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local float3 *dst, const __global float3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local char4 *dst, const __global char4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local uchar4 *dst, const __global uchar4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local short4 *dst, const __global short4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local ushort4 *dst, const __global ushort4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local int4 *dst, const __global int4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local uint4 *dst, const __global uint4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local long4 *dst, const __global long4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local ulong4 *dst, const __global ulong4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local float4 *dst, const __global float4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local char8 *dst, const __global char8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local uchar8 *dst, const __global uchar8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local short8 *dst, const __global short8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local ushort8 *dst, const __global ushort8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local int8 *dst, const __global int8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local uint8 *dst, const __global uint8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local long8 *dst, const __global long8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local ulong8 *dst, const __global ulong8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local float8 *dst, const __global float8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local char16 *dst, const __global char16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local uchar16 *dst, const __global uchar16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local short16 *dst, const __global short16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local ushort16 *dst, const __global ushort16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local int16 *dst, const __global int16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local uint16 *dst, const __global uint16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local long16 *dst, const __global long16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local ulong16 *dst, const __global ulong16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local float16 *dst, const __global float16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global char *dst, const __local char *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global uchar *dst, const __local uchar *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global short *dst, const __local short *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global ushort *dst, const __local ushort *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global int *dst, const __local int *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global uint *dst, const __local uint *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global long *dst, const __local long *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global ulong *dst, const __local ulong *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global float *dst, const __local float *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global char2 *dst, const __local char2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global uchar2 *dst, const __local uchar2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global short2 *dst, const __local short2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global ushort2 *dst, const __local ushort2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global int2 *dst, const __local int2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global uint2 *dst, const __local uint2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global long2 *dst, const __local long2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global ulong2 *dst, const __local ulong2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global float2 *dst, const __local float2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global char3 *dst, const __local char3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global uchar3 *dst, const __local uchar3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global short3 *dst, const __local short3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global ushort3 *dst, const __local ushort3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global int3 *dst, const __local int3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global uint3 *dst, const __local uint3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global long3 *dst, const __local long3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global ulong3 *dst, const __local ulong3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global float3 *dst, const __local float3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global char4 *dst, const __local char4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global uchar4 *dst, const __local uchar4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global short4 *dst, const __local short4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global ushort4 *dst, const __local ushort4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global int4 *dst, const __local int4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global uint4 *dst, const __local uint4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global long4 *dst, const __local long4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global ulong4 *dst, const __local ulong4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global float4 *dst, const __local float4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global char8 *dst, const __local char8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global uchar8 *dst, const __local uchar8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global short8 *dst, const __local short8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global ushort8 *dst, const __local ushort8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global int8 *dst, const __local int8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global uint8 *dst, const __local uint8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global long8 *dst, const __local long8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global ulong8 *dst, const __local ulong8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global float8 *dst, const __local float8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global char16 *dst, const __local char16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global uchar16 *dst, const __local uchar16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global short16 *dst, const __local short16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global ushort16 *dst, const __local ushort16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global int16 *dst, const __local int16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global uint16 *dst, const __local uint16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global long16 *dst, const __local long16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global ulong16 *dst, const __local ulong16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global float16 *dst, const __local float16 *src, size_t num_elements, event_t event); +#ifdef cl_khr_fp64 +event_t __ovld async_work_group_copy(__local double *dst, const __global double *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local double2 *dst, const __global double2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local double3 *dst, const __global double3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local double4 *dst, const __global double4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local double8 *dst, const __global double8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local double16 *dst, const __global double16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global double *dst, const __local double *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global double2 *dst, const __local double2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global double3 *dst, const __local double3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global double4 *dst, const __local double4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global double8 *dst, const __local double8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global double16 *dst, const __local double16 *src, size_t num_elements, event_t event); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +event_t __ovld async_work_group_copy(__local half *dst, const __global half *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local half2 *dst, const __global half2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local half3 *dst, const __global half3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local half4 *dst, const __global half4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local half8 *dst, const __global half8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__local half16 *dst, const __global half16 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global half *dst, const __local half *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global half2 *dst, const __local half2 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global half3 *dst, const __local half3 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global half4 *dst, const __local half4 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global half8 *dst, const __local half8 *src, size_t num_elements, event_t event); +event_t __ovld async_work_group_copy(__global half16 *dst, const __local half16 *src, size_t num_elements, event_t event); +#endif //cl_khr_fp16 + +/** + * Perform an async gather of num_elements + * gentype elements from src to dst. The + * src_stride is the stride in elements for each + * gentype element read from src. The dst_stride + * is the stride in elements for each gentype + * element written to dst. The async gather is + * performed by all work-items in a work-group. + * This built-in function must therefore be + * encountered by all work-items in a work-group + * executing the kernel with the same argument + * values; otherwise the results are undefined. + * Returns an event object that can be used by + * wait_group_events to wait for the async copy + * to finish. The event argument can also be used + * to associate the + * async_work_group_strided_copy with a + * previous async copy allowing an event to be + * shared by multiple async copies; otherwise event + * should be zero. + * If event argument is non-zero, the event object + * supplied in event argument will be returned. + * This function does not perform any implicit + * synchronization of source data such as using a + * barrier before performing the copy. + */ +event_t __ovld async_work_group_strided_copy(__local char *dst, const __global char *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local uchar *dst, const __global uchar *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local short *dst, const __global short *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local ushort *dst, const __global ushort *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local int *dst, const __global int *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local uint *dst, const __global uint *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local long *dst, const __global long *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local ulong *dst, const __global ulong *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local float *dst, const __global float *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local char2 *dst, const __global char2 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local uchar2 *dst, const __global uchar2 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local short2 *dst, const __global short2 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local ushort2 *dst, const __global ushort2 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local int2 *dst, const __global int2 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local uint2 *dst, const __global uint2 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local long2 *dst, const __global long2 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local ulong2 *dst, const __global ulong2 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local float2 *dst, const __global float2 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local char3 *dst, const __global char3 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local uchar3 *dst, const __global uchar3 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local short3 *dst, const __global short3 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local ushort3 *dst, const __global ushort3 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local int3 *dst, const __global int3 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local uint3 *dst, const __global uint3 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local long3 *dst, const __global long3 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local ulong3 *dst, const __global ulong3 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local float3 *dst, const __global float3 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local char4 *dst, const __global char4 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local uchar4 *dst, const __global uchar4 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local short4 *dst, const __global short4 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local ushort4 *dst, const __global ushort4 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local int4 *dst, const __global int4 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local uint4 *dst, const __global uint4 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local long4 *dst, const __global long4 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local ulong4 *dst, const __global ulong4 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local float4 *dst, const __global float4 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local char8 *dst, const __global char8 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local uchar8 *dst, const __global uchar8 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local short8 *dst, const __global short8 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local ushort8 *dst, const __global ushort8 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local int8 *dst, const __global int8 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local uint8 *dst, const __global uint8 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local long8 *dst, const __global long8 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local ulong8 *dst, const __global ulong8 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local float8 *dst, const __global float8 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local char16 *dst, const __global char16 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local uchar16 *dst, const __global uchar16 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local short16 *dst, const __global short16 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local ushort16 *dst, const __global ushort16 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local int16 *dst, const __global int16 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local uint16 *dst, const __global uint16 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local long16 *dst, const __global long16 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local ulong16 *dst, const __global ulong16 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local float16 *dst, const __global float16 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global char *dst, const __local char *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global uchar *dst, const __local uchar *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global short *dst, const __local short *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global ushort *dst, const __local ushort *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global int *dst, const __local int *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global uint *dst, const __local uint *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global long *dst, const __local long *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global ulong *dst, const __local ulong *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global float *dst, const __local float *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global char2 *dst, const __local char2 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global uchar2 *dst, const __local uchar2 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global short2 *dst, const __local short2 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global ushort2 *dst, const __local ushort2 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global int2 *dst, const __local int2 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global uint2 *dst, const __local uint2 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global long2 *dst, const __local long2 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global ulong2 *dst, const __local ulong2 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global float2 *dst, const __local float2 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global char3 *dst, const __local char3 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global uchar3 *dst, const __local uchar3 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global short3 *dst, const __local short3 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global ushort3 *dst, const __local ushort3 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global int3 *dst, const __local int3 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global uint3 *dst, const __local uint3 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global long3 *dst, const __local long3 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global ulong3 *dst, const __local ulong3 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global float3 *dst, const __local float3 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global char4 *dst, const __local char4 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global uchar4 *dst, const __local uchar4 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global short4 *dst, const __local short4 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global ushort4 *dst, const __local ushort4 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global int4 *dst, const __local int4 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global uint4 *dst, const __local uint4 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global long4 *dst, const __local long4 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global ulong4 *dst, const __local ulong4 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global float4 *dst, const __local float4 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global char8 *dst, const __local char8 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global uchar8 *dst, const __local uchar8 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global short8 *dst, const __local short8 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global ushort8 *dst, const __local ushort8 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global int8 *dst, const __local int8 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global uint8 *dst, const __local uint8 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global long8 *dst, const __local long8 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global ulong8 *dst, const __local ulong8 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global float8 *dst, const __local float8 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global char16 *dst, const __local char16 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global uchar16 *dst, const __local uchar16 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global short16 *dst, const __local short16 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global ushort16 *dst, const __local ushort16 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global int16 *dst, const __local int16 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global uint16 *dst, const __local uint16 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global long16 *dst, const __local long16 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global ulong16 *dst, const __local ulong16 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global float16 *dst, const __local float16 *src, size_t num_elements, size_t dst_stride, event_t event); +#ifdef cl_khr_fp64 +event_t __ovld async_work_group_strided_copy(__local double *dst, const __global double *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local double2 *dst, const __global double2 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local double3 *dst, const __global double3 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local double4 *dst, const __global double4 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local double8 *dst, const __global double8 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local double16 *dst, const __global double16 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global double *dst, const __local double *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global double2 *dst, const __local double2 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global double3 *dst, const __local double3 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global double4 *dst, const __local double4 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global double8 *dst, const __local double8 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global double16 *dst, const __local double16 *src, size_t num_elements, size_t dst_stride, event_t event); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +event_t __ovld async_work_group_strided_copy(__local half *dst, const __global half *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local half2 *dst, const __global half2 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local half3 *dst, const __global half3 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local half4 *dst, const __global half4 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local half8 *dst, const __global half8 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__local half16 *dst, const __global half16 *src, size_t num_elements, size_t src_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global half *dst, const __local half *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global half2 *dst, const __local half2 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global half3 *dst, const __local half3 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global half4 *dst, const __local half4 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global half8 *dst, const __local half8 *src, size_t num_elements, size_t dst_stride, event_t event); +event_t __ovld async_work_group_strided_copy(__global half16 *dst, const __local half16 *src, size_t num_elements, size_t dst_stride, event_t event); +#endif //cl_khr_fp16 + +/** + * Wait for events that identify the + * async_work_group_copy operations to + * complete. The event objects specified in + * event_list will be released after the wait is + * performed. + * This function must be encountered by all workitems + * in a work-group executing the kernel with + * the same num_events and event objects specified + * in event_list; otherwise the results are undefined. + */ +void __ovld wait_group_events(int num_events, event_t *event_list); + +/** + * Prefetch num_elements * sizeof(gentype) + * bytes into the global cache. The prefetch + * instruction is applied to a work-item in a workgroup + * and does not affect the functional + * behavior of the kernel. + */ +void __ovld prefetch(const __global char *p, size_t num_elements); +void __ovld prefetch(const __global uchar *p, size_t num_elements); +void __ovld prefetch(const __global short *p, size_t num_elements); +void __ovld prefetch(const __global ushort *p, size_t num_elements); +void __ovld prefetch(const __global int *p, size_t num_elements); +void __ovld prefetch(const __global uint *p, size_t num_elements); +void __ovld prefetch(const __global long *p, size_t num_elements); +void __ovld prefetch(const __global ulong *p, size_t num_elements); +void __ovld prefetch(const __global float *p, size_t num_elements); +void __ovld prefetch(const __global char2 *p, size_t num_elements); +void __ovld prefetch(const __global uchar2 *p, size_t num_elements); +void __ovld prefetch(const __global short2 *p, size_t num_elements); +void __ovld prefetch(const __global ushort2 *p, size_t num_elements); +void __ovld prefetch(const __global int2 *p, size_t num_elements); +void __ovld prefetch(const __global uint2 *p, size_t num_elements); +void __ovld prefetch(const __global long2 *p, size_t num_elements); +void __ovld prefetch(const __global ulong2 *p, size_t num_elements); +void __ovld prefetch(const __global float2 *p, size_t num_elements); +void __ovld prefetch(const __global char3 *p, size_t num_elements); +void __ovld prefetch(const __global uchar3 *p, size_t num_elements); +void __ovld prefetch(const __global short3 *p, size_t num_elements); +void __ovld prefetch(const __global ushort3 *p, size_t num_elements); +void __ovld prefetch(const __global int3 *p, size_t num_elements); +void __ovld prefetch(const __global uint3 *p, size_t num_elements); +void __ovld prefetch(const __global long3 *p, size_t num_elements); +void __ovld prefetch(const __global ulong3 *p, size_t num_elements); +void __ovld prefetch(const __global float3 *p, size_t num_elements); +void __ovld prefetch(const __global char4 *p, size_t num_elements); +void __ovld prefetch(const __global uchar4 *p, size_t num_elements); +void __ovld prefetch(const __global short4 *p, size_t num_elements); +void __ovld prefetch(const __global ushort4 *p, size_t num_elements); +void __ovld prefetch(const __global int4 *p, size_t num_elements); +void __ovld prefetch(const __global uint4 *p, size_t num_elements); +void __ovld prefetch(const __global long4 *p, size_t num_elements); +void __ovld prefetch(const __global ulong4 *p, size_t num_elements); +void __ovld prefetch(const __global float4 *p, size_t num_elements); +void __ovld prefetch(const __global char8 *p, size_t num_elements); +void __ovld prefetch(const __global uchar8 *p, size_t num_elements); +void __ovld prefetch(const __global short8 *p, size_t num_elements); +void __ovld prefetch(const __global ushort8 *p, size_t num_elements); +void __ovld prefetch(const __global int8 *p, size_t num_elements); +void __ovld prefetch(const __global uint8 *p, size_t num_elements); +void __ovld prefetch(const __global long8 *p, size_t num_elements); +void __ovld prefetch(const __global ulong8 *p, size_t num_elements); +void __ovld prefetch(const __global float8 *p, size_t num_elements); +void __ovld prefetch(const __global char16 *p, size_t num_elements); +void __ovld prefetch(const __global uchar16 *p, size_t num_elements); +void __ovld prefetch(const __global short16 *p, size_t num_elements); +void __ovld prefetch(const __global ushort16 *p, size_t num_elements); +void __ovld prefetch(const __global int16 *p, size_t num_elements); +void __ovld prefetch(const __global uint16 *p, size_t num_elements); +void __ovld prefetch(const __global long16 *p, size_t num_elements); +void __ovld prefetch(const __global ulong16 *p, size_t num_elements); +void __ovld prefetch(const __global float16 *p, size_t num_elements); +#ifdef cl_khr_fp64 +void __ovld prefetch(const __global double *p, size_t num_elements); +void __ovld prefetch(const __global double2 *p, size_t num_elements); +void __ovld prefetch(const __global double3 *p, size_t num_elements); +void __ovld prefetch(const __global double4 *p, size_t num_elements); +void __ovld prefetch(const __global double8 *p, size_t num_elements); +void __ovld prefetch(const __global double16 *p, size_t num_elements); +#endif //cl_khr_fp64 +#ifdef cl_khr_fp16 +void __ovld prefetch(const __global half *p, size_t num_elements); +void __ovld prefetch(const __global half2 *p, size_t num_elements); +void __ovld prefetch(const __global half3 *p, size_t num_elements); +void __ovld prefetch(const __global half4 *p, size_t num_elements); +void __ovld prefetch(const __global half8 *p, size_t num_elements); +void __ovld prefetch(const __global half16 *p, size_t num_elements); +#endif // cl_khr_fp16 + +// OpenCL v1.1 s6.11.1, v1.2 s6.12.11 - Atomic Functions + +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable +#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable +#endif +/** + * Read the 32-bit value (referred to as old) + * stored at location pointed by p. Compute + * (old + val) and store result at location + * pointed by p. The function returns old. + */ +int __ovld atomic_add(volatile __global int *p, int val); +unsigned int __ovld atomic_add(volatile __global unsigned int *p, unsigned int val); +int __ovld atomic_add(volatile __local int *p, int val); +unsigned int __ovld atomic_add(volatile __local unsigned int *p, unsigned int val); + +#if defined(cl_khr_global_int32_base_atomics) +int __ovld atom_add(volatile __global int *p, int val); +unsigned int __ovld atom_add(volatile __global unsigned int *p, unsigned int val); +#endif +#if defined(cl_khr_local_int32_base_atomics) +int __ovld atom_add(volatile __local int *p, int val); +unsigned int __ovld atom_add(volatile __local unsigned int *p, unsigned int val); +#endif + +#if defined(cl_khr_int64_base_atomics) +long __ovld atom_add(volatile __global long *p, long val); +unsigned long __ovld atom_add(volatile __global unsigned long *p, unsigned long val); +long __ovld atom_add(volatile __local long *p, long val); +unsigned long __ovld atom_add(volatile __local unsigned long *p, unsigned long val); +#endif + +/** + * Read the 32-bit value (referred to as old) stored at location pointed by p. + * Compute (old - val) and store result at location pointed by p. The function + * returns old. + */ +int __ovld atomic_sub(volatile __global int *p, int val); +unsigned int __ovld atomic_sub(volatile __global unsigned int *p, unsigned int val); +int __ovld atomic_sub(volatile __local int *p, int val); +unsigned int __ovld atomic_sub(volatile __local unsigned int *p, unsigned int val); + +#if defined(cl_khr_global_int32_base_atomics) +int __ovld atom_sub(volatile __global int *p, int val); +unsigned int __ovld atom_sub(volatile __global unsigned int *p, unsigned int val); +#endif +#if defined(cl_khr_local_int32_base_atomics) +int __ovld atom_sub(volatile __local int *p, int val); +unsigned int __ovld atom_sub(volatile __local unsigned int *p, unsigned int val); +#endif + +#if defined(cl_khr_int64_base_atomics) +long __ovld atom_sub(volatile __global long *p, long val); +unsigned long __ovld atom_sub(volatile __global unsigned long *p, unsigned long val); +long __ovld atom_sub(volatile __local long *p, long val); +unsigned long __ovld atom_sub(volatile __local unsigned long *p, unsigned long val); +#endif + +/** + * Swaps the old value stored at location p + * with new value given by val. Returns old + * value. + */ +int __ovld atomic_xchg(volatile __global int *p, int val); +unsigned int __ovld atomic_xchg(volatile __global unsigned int *p, unsigned int val); +int __ovld atomic_xchg(volatile __local int *p, int val); +unsigned int __ovld atomic_xchg(volatile __local unsigned int *p, unsigned int val); +float __ovld atomic_xchg(volatile __global float *p, float val); +float __ovld atomic_xchg(volatile __local float *p, float val); + +#if defined(cl_khr_global_int32_base_atomics) +int __ovld atom_xchg(volatile __global int *p, int val); +int __ovld atom_xchg(volatile __local int *p, int val); +#endif +#if defined(cl_khr_local_int32_base_atomics) +unsigned int __ovld atom_xchg(volatile __global unsigned int *p, unsigned int val); +unsigned int __ovld atom_xchg(volatile __local unsigned int *p, unsigned int val); +#endif + +#if defined(cl_khr_int64_base_atomics) +long __ovld atom_xchg(volatile __global long *p, long val); +long __ovld atom_xchg(volatile __local long *p, long val); +unsigned long __ovld atom_xchg(volatile __global unsigned long *p, unsigned long val); +unsigned long __ovld atom_xchg(volatile __local unsigned long *p, unsigned long val); +#endif + +/** + * Read the 32-bit value (referred to as old) + * stored at location pointed by p. Compute + * (old + 1) and store result at location + * pointed by p. The function returns old. + */ +int __ovld atomic_inc(volatile __global int *p); +unsigned int __ovld atomic_inc(volatile __global unsigned int *p); +int __ovld atomic_inc(volatile __local int *p); +unsigned int __ovld atomic_inc(volatile __local unsigned int *p); + +#if defined(cl_khr_global_int32_base_atomics) +int __ovld atom_inc(volatile __global int *p); +unsigned int __ovld atom_inc(volatile __global unsigned int *p); +#endif +#if defined(cl_khr_local_int32_base_atomics) +int __ovld atom_inc(volatile __local int *p); +unsigned int __ovld atom_inc(volatile __local unsigned int *p); +#endif + +#if defined(cl_khr_int64_base_atomics) +long __ovld atom_inc(volatile __global long *p); +unsigned long __ovld atom_inc(volatile __global unsigned long *p); +long __ovld atom_inc(volatile __local long *p); +unsigned long __ovld atom_inc(volatile __local unsigned long *p); +#endif + +/** + * Read the 32-bit value (referred to as old) + * stored at location pointed by p. Compute + * (old - 1) and store result at location + * pointed by p. The function returns old. + */ +int __ovld atomic_dec(volatile __global int *p); +unsigned int __ovld atomic_dec(volatile __global unsigned int *p); +int __ovld atomic_dec(volatile __local int *p); +unsigned int __ovld atomic_dec(volatile __local unsigned int *p); + +#if defined(cl_khr_global_int32_base_atomics) +int __ovld atom_dec(volatile __global int *p); +unsigned int __ovld atom_dec(volatile __global unsigned int *p); +#endif +#if defined(cl_khr_local_int32_base_atomics) +int __ovld atom_dec(volatile __local int *p); +unsigned int __ovld atom_dec(volatile __local unsigned int *p); +#endif + +#if defined(cl_khr_int64_base_atomics) +long __ovld atom_dec(volatile __global long *p); +unsigned long __ovld atom_dec(volatile __global unsigned long *p); +long __ovld atom_dec(volatile __local long *p); +unsigned long __ovld atom_dec(volatile __local unsigned long *p); +#endif + +/** + * Read the 32-bit value (referred to as old) + * stored at location pointed by p. Compute + * (old == cmp) ? val : old and store result at + * location pointed by p. The function + * returns old. + */ +int __ovld atomic_cmpxchg(volatile __global int *p, int cmp, int val); +unsigned int __ovld atomic_cmpxchg(volatile __global unsigned int *p, unsigned int cmp, unsigned int val); +int __ovld atomic_cmpxchg(volatile __local int *p, int cmp, int val); +unsigned int __ovld atomic_cmpxchg(volatile __local unsigned int *p, unsigned int cmp, unsigned int val); + +#if defined(cl_khr_global_int32_base_atomics) +int __ovld atom_cmpxchg(volatile __global int *p, int cmp, int val); +unsigned int __ovld atom_cmpxchg(volatile __global unsigned int *p, unsigned int cmp, unsigned int val); +#endif +#if defined(cl_khr_local_int32_base_atomics) +int __ovld atom_cmpxchg(volatile __local int *p, int cmp, int val); +unsigned int __ovld atom_cmpxchg(volatile __local unsigned int *p, unsigned int cmp, unsigned int val); +#endif + +#if defined(cl_khr_int64_base_atomics) +long __ovld atom_cmpxchg(volatile __global long *p, long cmp, long val); +unsigned long __ovld atom_cmpxchg(volatile __global unsigned long *p, unsigned long cmp, unsigned long val); +long __ovld atom_cmpxchg(volatile __local long *p, long cmp, long val); +unsigned long __ovld atom_cmpxchg(volatile __local unsigned long *p, unsigned long cmp, unsigned long val); +#endif + +/** + * Read the 32-bit value (referred to as old) + * stored at location pointed by p. Compute + * min(old, val) and store minimum value at + * location pointed by p. The function + * returns old. + */ +int __ovld atomic_min(volatile __global int *p, int val); +unsigned int __ovld atomic_min(volatile __global unsigned int *p, unsigned int val); +int __ovld atomic_min(volatile __local int *p, int val); +unsigned int __ovld atomic_min(volatile __local unsigned int *p, unsigned int val); + +#if defined(cl_khr_global_int32_extended_atomics) +int __ovld atom_min(volatile __global int *p, int val); +unsigned int __ovld atom_min(volatile __global unsigned int *p, unsigned int val); +#endif +#if defined(cl_khr_local_int32_extended_atomics) +int __ovld atom_min(volatile __local int *p, int val); +unsigned int __ovld atom_min(volatile __local unsigned int *p, unsigned int val); +#endif + +#if defined(cl_khr_int64_extended_atomics) +long __ovld atom_min(volatile __global long *p, long val); +unsigned long __ovld atom_min(volatile __global unsigned long *p, unsigned long val); +#endif +#if defined(cl_khr_local_int32_extended_atomics) +long __ovld atom_min(volatile __local long *p, long val); +unsigned long __ovld atom_min(volatile __local unsigned long *p, unsigned long val); +#endif + +/** + * Read the 32-bit value (referred to as old) + * stored at location pointed by p. Compute + * max(old, val) and store maximum value at + * location pointed by p. The function + * returns old. + */ +int __ovld atomic_max(volatile __global int *p, int val); +unsigned int __ovld atomic_max(volatile __global unsigned int *p, unsigned int val); +int __ovld atomic_max(volatile __local int *p, int val); +unsigned int __ovld atomic_max(volatile __local unsigned int *p, unsigned int val); + +#if defined(cl_khr_global_int32_extended_atomics) +int __ovld atom_max(volatile __global int *p, int val); +unsigned int __ovld atom_max(volatile __global unsigned int *p, unsigned int val); +#endif +#if defined(cl_khr_local_int32_extended_atomics) +int __ovld atom_max(volatile __local int *p, int val); +unsigned int __ovld atom_max(volatile __local unsigned int *p, unsigned int val); +#endif + +#if defined(cl_khr_int64_extended_atomics) +long __ovld atom_max(volatile __global long *p, long val); +unsigned long __ovld atom_max(volatile __global unsigned long *p, unsigned long val); +long __ovld atom_max(volatile __local long *p, long val); +unsigned long __ovld atom_max(volatile __local unsigned long *p, unsigned long val); +#endif + +/** + * Read the 32-bit value (referred to as old) + * stored at location pointed by p. Compute + * (old & val) and store result at location + * pointed by p. The function returns old. + */ +int __ovld atomic_and(volatile __global int *p, int val); +unsigned int __ovld atomic_and(volatile __global unsigned int *p, unsigned int val); +int __ovld atomic_and(volatile __local int *p, int val); +unsigned int __ovld atomic_and(volatile __local unsigned int *p, unsigned int val); + +#if defined(cl_khr_global_int32_extended_atomics) +int __ovld atom_and(volatile __global int *p, int val); +unsigned int __ovld atom_and(volatile __global unsigned int *p, unsigned int val); +#endif +#if defined(cl_khr_local_int32_extended_atomics) +int __ovld atom_and(volatile __local int *p, int val); +unsigned int __ovld atom_and(volatile __local unsigned int *p, unsigned int val); +#endif + +#if defined(cl_khr_int64_extended_atomics) +long __ovld atom_and(volatile __global long *p, long val); +unsigned long __ovld atom_and(volatile __global unsigned long *p, unsigned long val); +long __ovld atom_and(volatile __local long *p, long val); +unsigned long __ovld atom_and(volatile __local unsigned long *p, unsigned long val); +#endif + +/** + * Read the 32-bit value (referred to as old) + * stored at location pointed by p. Compute + * (old | val) and store result at location + * pointed by p. The function returns old. + */ +int __ovld atomic_or(volatile __global int *p, int val); +unsigned int __ovld atomic_or(volatile __global unsigned int *p, unsigned int val); +int __ovld atomic_or(volatile __local int *p, int val); +unsigned int __ovld atomic_or(volatile __local unsigned int *p, unsigned int val); + +#if defined(cl_khr_global_int32_extended_atomics) +int __ovld atom_or(volatile __global int *p, int val); +unsigned int __ovld atom_or(volatile __global unsigned int *p, unsigned int val); +#endif +#if defined(cl_khr_local_int32_extended_atomics) +int __ovld atom_or(volatile __local int *p, int val); +unsigned int __ovld atom_or(volatile __local unsigned int *p, unsigned int val); +#endif + +#if defined(cl_khr_int64_extended_atomics) +long __ovld atom_or(volatile __global long *p, long val); +unsigned long __ovld atom_or(volatile __global unsigned long *p, unsigned long val); +long __ovld atom_or(volatile __local long *p, long val); +unsigned long __ovld atom_or(volatile __local unsigned long *p, unsigned long val); +#endif + +/** + * Read the 32-bit value (referred to as old) + * stored at location pointed by p. Compute + * (old ^ val) and store result at location + * pointed by p. The function returns old. + */ +int __ovld atomic_xor(volatile __global int *p, int val); +unsigned int __ovld atomic_xor(volatile __global unsigned int *p, unsigned int val); +int __ovld atomic_xor(volatile __local int *p, int val); +unsigned int __ovld atomic_xor(volatile __local unsigned int *p, unsigned int val); + +#if defined(cl_khr_global_int32_extended_atomics) +int __ovld atom_xor(volatile __global int *p, int val); +unsigned int __ovld atom_xor(volatile __global unsigned int *p, unsigned int val); +#endif +#if defined(cl_khr_local_int32_extended_atomics) +int __ovld atom_xor(volatile __local int *p, int val); +unsigned int __ovld atom_xor(volatile __local unsigned int *p, unsigned int val); +#endif + +#if defined(cl_khr_int64_extended_atomics) +long __ovld atom_xor(volatile __global long *p, long val); +unsigned long __ovld atom_xor(volatile __global unsigned long *p, unsigned long val); +long __ovld atom_xor(volatile __local long *p, long val); +unsigned long __ovld atom_xor(volatile __local unsigned long *p, unsigned long val); +#endif + +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : disable +#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : disable +#endif + +// OpenCL v2.0 s6.13.11 - Atomics Functions + +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +#ifndef ATOMIC_VAR_INIT +#define ATOMIC_VAR_INIT(x) (x) +#endif //ATOMIC_VAR_INIT +#define ATOMIC_FLAG_INIT 0 + +// enum values aligned with what clang uses in EmitAtomicExpr() +typedef enum memory_order +{ + memory_order_relaxed, + memory_order_acquire, + memory_order_release, + memory_order_acq_rel, + memory_order_seq_cst +} memory_order; + +// double atomics support requires extensions cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable +#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable +#endif + +// atomic_init() +void __ovld atomic_init(volatile atomic_int *object, int value); +void __ovld atomic_init(volatile atomic_uint *object, uint value); +void __ovld atomic_init(volatile atomic_float *object, float value); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +void __ovld atomic_init(volatile atomic_long *object, long value); +void __ovld atomic_init(volatile atomic_ulong *object, ulong value); +#ifdef cl_khr_fp64 +void __ovld atomic_init(volatile atomic_double *object, double value); +#endif //cl_khr_fp64 +#endif + +// atomic_work_item_fence() +void __ovld atomic_work_item_fence(cl_mem_fence_flags flags, memory_order order, memory_scope scope); + +// atomic_fetch() + +int __ovld atomic_fetch_add(volatile atomic_int *object, int operand); +int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order); +int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_add(volatile atomic_uint *object, uint operand); +uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *object, uint operand, memory_order order); +uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_sub(volatile atomic_int *object, int operand); +int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order); +int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_sub(volatile atomic_uint *object, uint operand); +uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *object, uint operand, memory_order order); +uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_or(volatile atomic_int *object, int operand); +int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order); +int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_or(volatile atomic_uint *object, uint operand); +uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *object, uint operand, memory_order order); +uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_xor(volatile atomic_int *object, int operand); +int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order); +int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_xor(volatile atomic_uint *object, uint operand); +uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *object, uint operand, memory_order order); +uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_and(volatile atomic_int *object, int operand); +int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order); +int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_and(volatile atomic_uint *object, uint operand); +uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *object, uint operand, memory_order order); +uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_min(volatile atomic_int *object, int operand); +int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order); +int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_min(volatile atomic_uint *object, uint operand); +uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order); +uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_min(volatile atomic_uint *object, int operand); +uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, int operand, memory_order order); +uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, int operand, memory_order order, memory_scope scope); +int __ovld atomic_fetch_max(volatile atomic_int *object, int operand); +int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order); +int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_max(volatile atomic_uint *object, uint operand); +uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order); +uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope); +uint __ovld atomic_fetch_max(volatile atomic_uint *object, int operand); +uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, int operand, memory_order order); +uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, int operand, memory_order order, memory_scope scope); + +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +long __ovld atomic_fetch_add(volatile atomic_long *object, long operand); +long __ovld atomic_fetch_add_explicit(volatile atomic_long *object, long operand, memory_order order); +long __ovld atomic_fetch_add_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_add(volatile atomic_ulong *object, ulong operand); +ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *object, ulong operand, memory_order order); +ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_sub(volatile atomic_long *object, long operand); +long __ovld atomic_fetch_sub_explicit(volatile atomic_long *object, long operand, memory_order order); +long __ovld atomic_fetch_sub_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_sub(volatile atomic_ulong *object, ulong operand); +ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *object, ulong operand, memory_order order); +ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_or(volatile atomic_long *object, long operand); +long __ovld atomic_fetch_or_explicit(volatile atomic_long *object, long operand, memory_order order); +long __ovld atomic_fetch_or_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_or(volatile atomic_ulong *object, ulong operand); +ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *object, ulong operand, memory_order order); +ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_xor(volatile atomic_long *object, long operand); +long __ovld atomic_fetch_xor_explicit(volatile atomic_long *object, long operand, memory_order order); +long __ovld atomic_fetch_xor_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_xor(volatile atomic_ulong *object, ulong operand); +ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *object, ulong operand, memory_order order); +ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_and(volatile atomic_long *object, long operand); +long __ovld atomic_fetch_and_explicit(volatile atomic_long *object, long operand, memory_order order); +long __ovld atomic_fetch_and_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_and(volatile atomic_ulong *object, ulong operand); +ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *object, ulong operand, memory_order order); +ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_min(volatile atomic_long *object, long operand); +long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand, memory_order order); +long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_min(volatile atomic_ulong *object, ulong operand); +ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order); +ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_min(volatile atomic_ulong *object, long operand); +ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, long operand, memory_order order); +ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, long operand, memory_order order, memory_scope scope); +long __ovld atomic_fetch_max(volatile atomic_long *object, long operand); +long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order); +long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_max(volatile atomic_ulong *object, ulong operand); +ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order); +ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope); +ulong __ovld atomic_fetch_max(volatile atomic_ulong *object, long operand); +ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, long operand, memory_order order); +ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, long operand, memory_order order, memory_scope scope); +#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) + +// OpenCL v2.0 s6.13.11.7.5: +// add/sub: atomic type argument can be uintptr_t/intptr_t, value type argument can be ptrdiff_t. +// or/xor/and/min/max: atomic type argument can be intptr_t/uintptr_t, value type argument can be intptr_t/uintptr_t. + +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +uintptr_t __ovld atomic_fetch_add(volatile atomic_uintptr_t *object, ptrdiff_t operand); +uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order); +uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope); +uintptr_t __ovld atomic_fetch_sub(volatile atomic_uintptr_t *object, ptrdiff_t operand); +uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order); +uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope); + +uintptr_t __ovld atomic_fetch_or(volatile atomic_uintptr_t *object, intptr_t operand); +uintptr_t __ovld atomic_fetch_or_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order); +uintptr_t __ovld atomic_fetch_or_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope); +uintptr_t __ovld atomic_fetch_xor(volatile atomic_uintptr_t *object, intptr_t operand); +uintptr_t __ovld atomic_fetch_xor_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order); +uintptr_t __ovld atomic_fetch_xor_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope); +uintptr_t __ovld atomic_fetch_and(volatile atomic_uintptr_t *object, intptr_t operand); +uintptr_t __ovld atomic_fetch_and_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order); +uintptr_t __ovld atomic_fetch_and_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope); +uintptr_t __ovld atomic_fetch_min(volatile atomic_uintptr_t *object, intptr_t opermax); +uintptr_t __ovld atomic_fetch_min_explicit(volatile atomic_uintptr_t *object, intptr_t opermax, memory_order minder); +uintptr_t __ovld atomic_fetch_min_explicit(volatile atomic_uintptr_t *object, intptr_t opermax, memory_order minder, memory_scope scope); +uintptr_t __ovld atomic_fetch_max(volatile atomic_uintptr_t *object, intptr_t opermax); +uintptr_t __ovld atomic_fetch_max_explicit(volatile atomic_uintptr_t *object, intptr_t opermax, memory_order minder); +uintptr_t __ovld atomic_fetch_max_explicit(volatile atomic_uintptr_t *object, intptr_t opermax, memory_order minder, memory_scope scope); + +intptr_t __ovld atomic_fetch_or(volatile atomic_intptr_t *object, uintptr_t operand); +intptr_t __ovld atomic_fetch_or_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order); +intptr_t __ovld atomic_fetch_or_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope); +intptr_t __ovld atomic_fetch_xor(volatile atomic_intptr_t *object, uintptr_t operand); +intptr_t __ovld atomic_fetch_xor_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order); +intptr_t __ovld atomic_fetch_xor_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope); +intptr_t __ovld atomic_fetch_and(volatile atomic_intptr_t *object, uintptr_t operand); +intptr_t __ovld atomic_fetch_and_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order); +intptr_t __ovld atomic_fetch_and_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope); +intptr_t __ovld atomic_fetch_min(volatile atomic_intptr_t *object, uintptr_t opermax); +intptr_t __ovld atomic_fetch_min_explicit(volatile atomic_intptr_t *object, uintptr_t opermax, memory_order minder); +intptr_t __ovld atomic_fetch_min_explicit(volatile atomic_intptr_t *object, uintptr_t opermax, memory_order minder, memory_scope scope); +intptr_t __ovld atomic_fetch_max(volatile atomic_intptr_t *object, uintptr_t opermax); +intptr_t __ovld atomic_fetch_max_explicit(volatile atomic_intptr_t *object, uintptr_t opermax, memory_order minder); +intptr_t __ovld atomic_fetch_max_explicit(volatile atomic_intptr_t *object, uintptr_t opermax, memory_order minder, memory_scope scope); +#endif + +// atomic_store() + +void __ovld atomic_store(volatile atomic_int *object, int desired); +void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order); +void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order, memory_scope scope); +void __ovld atomic_store(volatile atomic_uint *object, uint desired); +void __ovld atomic_store_explicit(volatile atomic_uint *object, uint desired, memory_order order); +void __ovld atomic_store_explicit(volatile atomic_uint *object, uint desired, memory_order order, memory_scope scope); +void __ovld atomic_store(volatile atomic_float *object, float desired); +void __ovld atomic_store_explicit(volatile atomic_float *object, float desired, memory_order order); +void __ovld atomic_store_explicit(volatile atomic_float *object, float desired, memory_order order, memory_scope scope); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +void __ovld atomic_store(volatile atomic_double *object, double desired); +void __ovld atomic_store_explicit(volatile atomic_double *object, double desired, memory_order order); +void __ovld atomic_store_explicit(volatile atomic_double *object, double desired, memory_order order, memory_scope scope); +#endif //cl_khr_fp64 +void __ovld atomic_store(volatile atomic_long *object, long desired); +void __ovld atomic_store_explicit(volatile atomic_long *object, long desired, memory_order order); +void __ovld atomic_store_explicit(volatile atomic_long *object, long desired, memory_order order, memory_scope scope); +void __ovld atomic_store(volatile atomic_ulong *object, ulong desired); +void __ovld atomic_store_explicit(volatile atomic_ulong *object, ulong desired, memory_order order); +void __ovld atomic_store_explicit(volatile atomic_ulong *object, ulong desired, memory_order order, memory_scope scope); +#endif + +// atomic_load() + +int __ovld atomic_load(volatile atomic_int *object); +int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order); +int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order, memory_scope scope); +uint __ovld atomic_load(volatile atomic_uint *object); +uint __ovld atomic_load_explicit(volatile atomic_uint *object, memory_order order); +uint __ovld atomic_load_explicit(volatile atomic_uint *object, memory_order order, memory_scope scope); +float __ovld atomic_load(volatile atomic_float *object); +float __ovld atomic_load_explicit(volatile atomic_float *object, memory_order order); +float __ovld atomic_load_explicit(volatile atomic_float *object, memory_order order, memory_scope scope); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +double __ovld atomic_load(volatile atomic_double *object); +double __ovld atomic_load_explicit(volatile atomic_double *object, memory_order order); +double __ovld atomic_load_explicit(volatile atomic_double *object, memory_order order, memory_scope scope); +#endif //cl_khr_fp64 +long __ovld atomic_load(volatile atomic_long *object); +long __ovld atomic_load_explicit(volatile atomic_long *object, memory_order order); +long __ovld atomic_load_explicit(volatile atomic_long *object, memory_order order, memory_scope scope); +ulong __ovld atomic_load(volatile atomic_ulong *object); +ulong __ovld atomic_load_explicit(volatile atomic_ulong *object, memory_order order); +ulong __ovld atomic_load_explicit(volatile atomic_ulong *object, memory_order order, memory_scope scope); +#endif + +// atomic_exchange() + +int __ovld atomic_exchange(volatile atomic_int *object, int desired); +int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order); +int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order, memory_scope scope); +uint __ovld atomic_exchange(volatile atomic_uint *object, uint desired); +uint __ovld atomic_exchange_explicit(volatile atomic_uint *object, uint desired, memory_order order); +uint __ovld atomic_exchange_explicit(volatile atomic_uint *object, uint desired, memory_order order, memory_scope scope); +float __ovld atomic_exchange(volatile atomic_float *object, float desired); +float __ovld atomic_exchange_explicit(volatile atomic_float *object, float desired, memory_order order); +float __ovld atomic_exchange_explicit(volatile atomic_float *object, float desired, memory_order order, memory_scope scope); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +double __ovld atomic_exchange(volatile atomic_double *object, double desired); +double __ovld atomic_exchange_explicit(volatile atomic_double *object, double desired, memory_order order); +double __ovld atomic_exchange_explicit(volatile atomic_double *object, double desired, memory_order order, memory_scope scope); +#endif //cl_khr_fp64 +long __ovld atomic_exchange(volatile atomic_long *object, long desired); +long __ovld atomic_exchange_explicit(volatile atomic_long *object, long desired, memory_order order); +long __ovld atomic_exchange_explicit(volatile atomic_long *object, long desired, memory_order order, memory_scope scope); +ulong __ovld atomic_exchange(volatile atomic_ulong *object, ulong desired); +ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *object, ulong desired, memory_order order); +ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *object, ulong desired, memory_order order, memory_scope scope); +#endif + +// atomic_compare_exchange_strong() and atomic_compare_exchange_weak() + +bool __ovld atomic_compare_exchange_strong(volatile atomic_int *object, int *expected, int desired); +bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected, + int desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected, + int desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong(volatile atomic_uint *object, uint *expected, uint desired); +bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *object, uint *expected, + uint desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *object, uint *expected, + uint desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak(volatile atomic_int *object, int *expected, int desired); +bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected, + int desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected, + int desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak(volatile atomic_uint *object, uint *expected, uint desired); +bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *object, uint *expected, + uint desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *object, uint *expected, + uint desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong(volatile atomic_float *object, float *expected, float desired); +bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *object, float *expected, + float desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *object, float *expected, + float desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak(volatile atomic_float *object, float *expected, float desired); +bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *object, float *expected, + float desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *object, float *expected, + float desired, memory_order success, memory_order failure, memory_scope scope); +#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) +#ifdef cl_khr_fp64 +bool __ovld atomic_compare_exchange_strong(volatile atomic_double *object, double *expected, double desired); +bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *object, double *expected, + double desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *object, double *expected, + double desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak(volatile atomic_double *object, double *expected, double desired); +bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *object, double *expected, + double desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *object, double *expected, + double desired, memory_order success, memory_order failure, memory_scope scope); +#endif //cl_khr_fp64 +bool __ovld atomic_compare_exchange_strong(volatile atomic_long *object, long *expected, long desired); +bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *object, long *expected, + long desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *object, long *expected, + long desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak(volatile atomic_long *object, long *expected, long desired); +bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *object, long *expected, + long desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *object, long *expected, + long desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_strong(volatile atomic_ulong *object, ulong *expected, ulong desired); +bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *object, ulong *expected, + ulong desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *object, ulong *expected, + ulong desired, memory_order success, memory_order failure, memory_scope scope); +bool __ovld atomic_compare_exchange_weak(volatile atomic_ulong *object, ulong *expected, ulong desired); +bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *object, ulong *expected, + ulong desired, memory_order success, memory_order failure); +bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *object, ulong *expected, + ulong desired, memory_order success, memory_order failure, memory_scope scope); +#endif + +// atomic_flag_test_and_set() and atomic_flag_clear() + +bool __ovld atomic_flag_test_and_set(volatile atomic_flag *object); +bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order); +bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope); +void __ovld atomic_flag_clear(volatile atomic_flag *object); +void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order); +void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope); + +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +// OpenCL v1.1 s6.11.12, v1.2 s6.12.12, v2.0 s6.13.12 - Miscellaneous Vector Functions + +/** + * The shuffle and shuffle2 built-in functions construct + * a permutation of elements from one or two input + * vectors respectively that are of the same type, + * returning a vector with the same element type as the + * input and length that is the same as the shuffle mask. + * The size of each element in the mask must match the + * size of each element in the result. For shuffle, only + * the ilogb(2m-1) least significant bits of each mask + * element are considered. For shuffle2, only the + * ilogb(2m-1)+1 least significant bits of each mask + * element are considered. Other bits in the mask shall + * be ignored. + * The elements of the input vectors are numbered from + * left to right across one or both of the vectors. For this + * purpose, the number of elements in a vector is given + * by vec_step(gentypem). The shuffle mask operand + * specifies, for each element of the result vector, which + * element of the one or two input vectors the result + * element gets. + * Examples: + * uint4 mask = (uint4)(3, 2, + * 1, 0); + * float4 a; + * float4 r = shuffle(a, mask); + * // r.s0123 = a.wzyx + * uint8 mask = (uint8)(0, 1, 2, 3, + * 4, 5, 6, 7); + * float4 a, b; + * float8 r = shuffle2(a, b, mask); + * // r.s0123 = a.xyzw + * // r.s4567 = b.xyzw + * uint4 mask; + * float8 a; + * float4 b; + * b = shuffle(a, mask); + * Examples that are not valid are: + * uint8 mask; + * short16 a; + * short8 b; + * b = shuffle(a, mask); <- not valid + */ +char2 __ovld __cnfn shuffle(char2 x, uchar2 mask); +char2 __ovld __cnfn shuffle(char4 x, uchar2 mask); +char2 __ovld __cnfn shuffle(char8 x, uchar2 mask); +char2 __ovld __cnfn shuffle(char16 x, uchar2 mask); + +uchar2 __ovld __cnfn shuffle(uchar2 x, uchar2 mask); +uchar2 __ovld __cnfn shuffle(uchar4 x, uchar2 mask); +uchar2 __ovld __cnfn shuffle(uchar8 x, uchar2 mask); +uchar2 __ovld __cnfn shuffle(uchar16 x, uchar2 mask); + +short2 __ovld __cnfn shuffle(short2 x, ushort2 mask); +short2 __ovld __cnfn shuffle(short4 x, ushort2 mask); +short2 __ovld __cnfn shuffle(short8 x, ushort2 mask); +short2 __ovld __cnfn shuffle(short16 x, ushort2 mask); + +ushort2 __ovld __cnfn shuffle(ushort2 x, ushort2 mask); +ushort2 __ovld __cnfn shuffle(ushort4 x, ushort2 mask); +ushort2 __ovld __cnfn shuffle(ushort8 x, ushort2 mask); +ushort2 __ovld __cnfn shuffle(ushort16 x, ushort2 mask); + +int2 __ovld __cnfn shuffle(int2 x, uint2 mask); +int2 __ovld __cnfn shuffle(int4 x, uint2 mask); +int2 __ovld __cnfn shuffle(int8 x, uint2 mask); +int2 __ovld __cnfn shuffle(int16 x, uint2 mask); + +uint2 __ovld __cnfn shuffle(uint2 x, uint2 mask); +uint2 __ovld __cnfn shuffle(uint4 x, uint2 mask); +uint2 __ovld __cnfn shuffle(uint8 x, uint2 mask); +uint2 __ovld __cnfn shuffle(uint16 x, uint2 mask); + +long2 __ovld __cnfn shuffle(long2 x, ulong2 mask); +long2 __ovld __cnfn shuffle(long4 x, ulong2 mask); +long2 __ovld __cnfn shuffle(long8 x, ulong2 mask); +long2 __ovld __cnfn shuffle(long16 x, ulong2 mask); + +ulong2 __ovld __cnfn shuffle(ulong2 x, ulong2 mask); +ulong2 __ovld __cnfn shuffle(ulong4 x, ulong2 mask); +ulong2 __ovld __cnfn shuffle(ulong8 x, ulong2 mask); +ulong2 __ovld __cnfn shuffle(ulong16 x, ulong2 mask); + +float2 __ovld __cnfn shuffle(float2 x, uint2 mask); +float2 __ovld __cnfn shuffle(float4 x, uint2 mask); +float2 __ovld __cnfn shuffle(float8 x, uint2 mask); +float2 __ovld __cnfn shuffle(float16 x, uint2 mask); + +char4 __ovld __cnfn shuffle(char2 x, uchar4 mask); +char4 __ovld __cnfn shuffle(char4 x, uchar4 mask); +char4 __ovld __cnfn shuffle(char8 x, uchar4 mask); +char4 __ovld __cnfn shuffle(char16 x, uchar4 mask); + +uchar4 __ovld __cnfn shuffle(uchar2 x, uchar4 mask); +uchar4 __ovld __cnfn shuffle(uchar4 x, uchar4 mask); +uchar4 __ovld __cnfn shuffle(uchar8 x, uchar4 mask); +uchar4 __ovld __cnfn shuffle(uchar16 x, uchar4 mask); + +short4 __ovld __cnfn shuffle(short2 x, ushort4 mask); +short4 __ovld __cnfn shuffle(short4 x, ushort4 mask); +short4 __ovld __cnfn shuffle(short8 x, ushort4 mask); +short4 __ovld __cnfn shuffle(short16 x, ushort4 mask); + +ushort4 __ovld __cnfn shuffle(ushort2 x, ushort4 mask); +ushort4 __ovld __cnfn shuffle(ushort4 x, ushort4 mask); +ushort4 __ovld __cnfn shuffle(ushort8 x, ushort4 mask); +ushort4 __ovld __cnfn shuffle(ushort16 x, ushort4 mask); + +int4 __ovld __cnfn shuffle(int2 x, uint4 mask); +int4 __ovld __cnfn shuffle(int4 x, uint4 mask); +int4 __ovld __cnfn shuffle(int8 x, uint4 mask); +int4 __ovld __cnfn shuffle(int16 x, uint4 mask); + +uint4 __ovld __cnfn shuffle(uint2 x, uint4 mask); +uint4 __ovld __cnfn shuffle(uint4 x, uint4 mask); +uint4 __ovld __cnfn shuffle(uint8 x, uint4 mask); +uint4 __ovld __cnfn shuffle(uint16 x, uint4 mask); + +long4 __ovld __cnfn shuffle(long2 x, ulong4 mask); +long4 __ovld __cnfn shuffle(long4 x, ulong4 mask); +long4 __ovld __cnfn shuffle(long8 x, ulong4 mask); +long4 __ovld __cnfn shuffle(long16 x, ulong4 mask); + +ulong4 __ovld __cnfn shuffle(ulong2 x, ulong4 mask); +ulong4 __ovld __cnfn shuffle(ulong4 x, ulong4 mask); +ulong4 __ovld __cnfn shuffle(ulong8 x, ulong4 mask); +ulong4 __ovld __cnfn shuffle(ulong16 x, ulong4 mask); + +float4 __ovld __cnfn shuffle(float2 x, uint4 mask); +float4 __ovld __cnfn shuffle(float4 x, uint4 mask); +float4 __ovld __cnfn shuffle(float8 x, uint4 mask); +float4 __ovld __cnfn shuffle(float16 x, uint4 mask); + +char8 __ovld __cnfn shuffle(char2 x, uchar8 mask); +char8 __ovld __cnfn shuffle(char4 x, uchar8 mask); +char8 __ovld __cnfn shuffle(char8 x, uchar8 mask); +char8 __ovld __cnfn shuffle(char16 x, uchar8 mask); + +uchar8 __ovld __cnfn shuffle(uchar2 x, uchar8 mask); +uchar8 __ovld __cnfn shuffle(uchar4 x, uchar8 mask); +uchar8 __ovld __cnfn shuffle(uchar8 x, uchar8 mask); +uchar8 __ovld __cnfn shuffle(uchar16 x, uchar8 mask); + +short8 __ovld __cnfn shuffle(short2 x, ushort8 mask); +short8 __ovld __cnfn shuffle(short4 x, ushort8 mask); +short8 __ovld __cnfn shuffle(short8 x, ushort8 mask); +short8 __ovld __cnfn shuffle(short16 x, ushort8 mask); + +ushort8 __ovld __cnfn shuffle(ushort2 x, ushort8 mask); +ushort8 __ovld __cnfn shuffle(ushort4 x, ushort8 mask); +ushort8 __ovld __cnfn shuffle(ushort8 x, ushort8 mask); +ushort8 __ovld __cnfn shuffle(ushort16 x, ushort8 mask); + +int8 __ovld __cnfn shuffle(int2 x, uint8 mask); +int8 __ovld __cnfn shuffle(int4 x, uint8 mask); +int8 __ovld __cnfn shuffle(int8 x, uint8 mask); +int8 __ovld __cnfn shuffle(int16 x, uint8 mask); + +uint8 __ovld __cnfn shuffle(uint2 x, uint8 mask); +uint8 __ovld __cnfn shuffle(uint4 x, uint8 mask); +uint8 __ovld __cnfn shuffle(uint8 x, uint8 mask); +uint8 __ovld __cnfn shuffle(uint16 x, uint8 mask); + +long8 __ovld __cnfn shuffle(long2 x, ulong8 mask); +long8 __ovld __cnfn shuffle(long4 x, ulong8 mask); +long8 __ovld __cnfn shuffle(long8 x, ulong8 mask); +long8 __ovld __cnfn shuffle(long16 x, ulong8 mask); + +ulong8 __ovld __cnfn shuffle(ulong2 x, ulong8 mask); +ulong8 __ovld __cnfn shuffle(ulong4 x, ulong8 mask); +ulong8 __ovld __cnfn shuffle(ulong8 x, ulong8 mask); +ulong8 __ovld __cnfn shuffle(ulong16 x, ulong8 mask); + +float8 __ovld __cnfn shuffle(float2 x, uint8 mask); +float8 __ovld __cnfn shuffle(float4 x, uint8 mask); +float8 __ovld __cnfn shuffle(float8 x, uint8 mask); +float8 __ovld __cnfn shuffle(float16 x, uint8 mask); + +char16 __ovld __cnfn shuffle(char2 x, uchar16 mask); +char16 __ovld __cnfn shuffle(char4 x, uchar16 mask); +char16 __ovld __cnfn shuffle(char8 x, uchar16 mask); +char16 __ovld __cnfn shuffle(char16 x, uchar16 mask); + +uchar16 __ovld __cnfn shuffle(uchar2 x, uchar16 mask); +uchar16 __ovld __cnfn shuffle(uchar4 x, uchar16 mask); +uchar16 __ovld __cnfn shuffle(uchar8 x, uchar16 mask); +uchar16 __ovld __cnfn shuffle(uchar16 x, uchar16 mask); + +short16 __ovld __cnfn shuffle(short2 x, ushort16 mask); +short16 __ovld __cnfn shuffle(short4 x, ushort16 mask); +short16 __ovld __cnfn shuffle(short8 x, ushort16 mask); +short16 __ovld __cnfn shuffle(short16 x, ushort16 mask); + +ushort16 __ovld __cnfn shuffle(ushort2 x, ushort16 mask); +ushort16 __ovld __cnfn shuffle(ushort4 x, ushort16 mask); +ushort16 __ovld __cnfn shuffle(ushort8 x, ushort16 mask); +ushort16 __ovld __cnfn shuffle(ushort16 x, ushort16 mask); + +int16 __ovld __cnfn shuffle(int2 x, uint16 mask); +int16 __ovld __cnfn shuffle(int4 x, uint16 mask); +int16 __ovld __cnfn shuffle(int8 x, uint16 mask); +int16 __ovld __cnfn shuffle(int16 x, uint16 mask); + +uint16 __ovld __cnfn shuffle(uint2 x, uint16 mask); +uint16 __ovld __cnfn shuffle(uint4 x, uint16 mask); +uint16 __ovld __cnfn shuffle(uint8 x, uint16 mask); +uint16 __ovld __cnfn shuffle(uint16 x, uint16 mask); + +long16 __ovld __cnfn shuffle(long2 x, ulong16 mask); +long16 __ovld __cnfn shuffle(long4 x, ulong16 mask); +long16 __ovld __cnfn shuffle(long8 x, ulong16 mask); +long16 __ovld __cnfn shuffle(long16 x, ulong16 mask); + +ulong16 __ovld __cnfn shuffle(ulong2 x, ulong16 mask); +ulong16 __ovld __cnfn shuffle(ulong4 x, ulong16 mask); +ulong16 __ovld __cnfn shuffle(ulong8 x, ulong16 mask); +ulong16 __ovld __cnfn shuffle(ulong16 x, ulong16 mask); + +float16 __ovld __cnfn shuffle(float2 x, uint16 mask); +float16 __ovld __cnfn shuffle(float4 x, uint16 mask); +float16 __ovld __cnfn shuffle(float8 x, uint16 mask); +float16 __ovld __cnfn shuffle(float16 x, uint16 mask); + +#ifdef cl_khr_fp64 +double2 __ovld __cnfn shuffle(double2 x, ulong2 mask); +double2 __ovld __cnfn shuffle(double4 x, ulong2 mask); +double2 __ovld __cnfn shuffle(double8 x, ulong2 mask); +double2 __ovld __cnfn shuffle(double16 x, ulong2 mask); + +double4 __ovld __cnfn shuffle(double2 x, ulong4 mask); +double4 __ovld __cnfn shuffle(double4 x, ulong4 mask); +double4 __ovld __cnfn shuffle(double8 x, ulong4 mask); +double4 __ovld __cnfn shuffle(double16 x, ulong4 mask); + +double8 __ovld __cnfn shuffle(double2 x, ulong8 mask); +double8 __ovld __cnfn shuffle(double4 x, ulong8 mask); +double8 __ovld __cnfn shuffle(double8 x, ulong8 mask); +double8 __ovld __cnfn shuffle(double16 x, ulong8 mask); + +double16 __ovld __cnfn shuffle(double2 x, ulong16 mask); +double16 __ovld __cnfn shuffle(double4 x, ulong16 mask); +double16 __ovld __cnfn shuffle(double8 x, ulong16 mask); +double16 __ovld __cnfn shuffle(double16 x, ulong16 mask); +#endif //cl_khr_fp64 + +#ifdef cl_khr_fp16 +half2 __ovld __cnfn shuffle(half2 x, ushort2 mask); +half2 __ovld __cnfn shuffle(half4 x, ushort2 mask); +half2 __ovld __cnfn shuffle(half8 x, ushort2 mask); +half2 __ovld __cnfn shuffle(half16 x, ushort2 mask); + +half4 __ovld __cnfn shuffle(half2 x, ushort4 mask); +half4 __ovld __cnfn shuffle(half4 x, ushort4 mask); +half4 __ovld __cnfn shuffle(half8 x, ushort4 mask); +half4 __ovld __cnfn shuffle(half16 x, ushort4 mask); + +half8 __ovld __cnfn shuffle(half2 x, ushort8 mask); +half8 __ovld __cnfn shuffle(half4 x, ushort8 mask); +half8 __ovld __cnfn shuffle(half8 x, ushort8 mask); +half8 __ovld __cnfn shuffle(half16 x, ushort8 mask); + +half16 __ovld __cnfn shuffle(half2 x, ushort16 mask); +half16 __ovld __cnfn shuffle(half4 x, ushort16 mask); +half16 __ovld __cnfn shuffle(half8 x, ushort16 mask); +half16 __ovld __cnfn shuffle(half16 x, ushort16 mask); +#endif //cl_khr_fp16 + +char2 __ovld __cnfn shuffle2(char2 x, char2 y, uchar2 mask); +char2 __ovld __cnfn shuffle2(char4 x, char4 y, uchar2 mask); +char2 __ovld __cnfn shuffle2(char8 x, char8 y, uchar2 mask); +char2 __ovld __cnfn shuffle2(char16 x, char16 y, uchar2 mask); + +uchar2 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar2 mask); +uchar2 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar2 mask); +uchar2 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar2 mask); +uchar2 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar2 mask); + +short2 __ovld __cnfn shuffle2(short2 x, short2 y, ushort2 mask); +short2 __ovld __cnfn shuffle2(short4 x, short4 y, ushort2 mask); +short2 __ovld __cnfn shuffle2(short8 x, short8 y, ushort2 mask); +short2 __ovld __cnfn shuffle2(short16 x, short16 y, ushort2 mask); + +ushort2 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort2 mask); +ushort2 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort2 mask); +ushort2 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort2 mask); +ushort2 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort2 mask); + +int2 __ovld __cnfn shuffle2(int2 x, int2 y, uint2 mask); +int2 __ovld __cnfn shuffle2(int4 x, int4 y, uint2 mask); +int2 __ovld __cnfn shuffle2(int8 x, int8 y, uint2 mask); +int2 __ovld __cnfn shuffle2(int16 x, int16 y, uint2 mask); + +uint2 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint2 mask); +uint2 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint2 mask); +uint2 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint2 mask); +uint2 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint2 mask); + +long2 __ovld __cnfn shuffle2(long2 x, long2 y, ulong2 mask); +long2 __ovld __cnfn shuffle2(long4 x, long4 y, ulong2 mask); +long2 __ovld __cnfn shuffle2(long8 x, long8 y, ulong2 mask); +long2 __ovld __cnfn shuffle2(long16 x, long16 y, ulong2 mask); + +ulong2 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong2 mask); +ulong2 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong2 mask); +ulong2 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong2 mask); +ulong2 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong2 mask); + +float2 __ovld __cnfn shuffle2(float2 x, float2 y, uint2 mask); +float2 __ovld __cnfn shuffle2(float4 x, float4 y, uint2 mask); +float2 __ovld __cnfn shuffle2(float8 x, float8 y, uint2 mask); +float2 __ovld __cnfn shuffle2(float16 x, float16 y, uint2 mask); + +char4 __ovld __cnfn shuffle2(char2 x, char2 y, uchar4 mask); +char4 __ovld __cnfn shuffle2(char4 x, char4 y, uchar4 mask); +char4 __ovld __cnfn shuffle2(char8 x, char8 y, uchar4 mask); +char4 __ovld __cnfn shuffle2(char16 x, char16 y, uchar4 mask); + +uchar4 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar4 mask); +uchar4 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar4 mask); +uchar4 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar4 mask); +uchar4 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar4 mask); + +short4 __ovld __cnfn shuffle2(short2 x, short2 y, ushort4 mask); +short4 __ovld __cnfn shuffle2(short4 x, short4 y, ushort4 mask); +short4 __ovld __cnfn shuffle2(short8 x, short8 y, ushort4 mask); +short4 __ovld __cnfn shuffle2(short16 x, short16 y, ushort4 mask); + +ushort4 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort4 mask); +ushort4 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort4 mask); +ushort4 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort4 mask); +ushort4 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort4 mask); + +int4 __ovld __cnfn shuffle2(int2 x, int2 y, uint4 mask); +int4 __ovld __cnfn shuffle2(int4 x, int4 y, uint4 mask); +int4 __ovld __cnfn shuffle2(int8 x, int8 y, uint4 mask); +int4 __ovld __cnfn shuffle2(int16 x, int16 y, uint4 mask); + +uint4 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint4 mask); +uint4 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint4 mask); +uint4 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint4 mask); +uint4 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint4 mask); + +long4 __ovld __cnfn shuffle2(long2 x, long2 y, ulong4 mask); +long4 __ovld __cnfn shuffle2(long4 x, long4 y, ulong4 mask); +long4 __ovld __cnfn shuffle2(long8 x, long8 y, ulong4 mask); +long4 __ovld __cnfn shuffle2(long16 x, long16 y, ulong4 mask); + +ulong4 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong4 mask); +ulong4 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong4 mask); +ulong4 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong4 mask); +ulong4 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong4 mask); + +float4 __ovld __cnfn shuffle2(float2 x, float2 y, uint4 mask); +float4 __ovld __cnfn shuffle2(float4 x, float4 y, uint4 mask); +float4 __ovld __cnfn shuffle2(float8 x, float8 y, uint4 mask); +float4 __ovld __cnfn shuffle2(float16 x, float16 y, uint4 mask); + +char8 __ovld __cnfn shuffle2(char2 x, char2 y, uchar8 mask); +char8 __ovld __cnfn shuffle2(char4 x, char4 y, uchar8 mask); +char8 __ovld __cnfn shuffle2(char8 x, char8 y, uchar8 mask); +char8 __ovld __cnfn shuffle2(char16 x, char16 y, uchar8 mask); + +uchar8 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar8 mask); +uchar8 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar8 mask); +uchar8 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar8 mask); +uchar8 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar8 mask); + +short8 __ovld __cnfn shuffle2(short2 x, short2 y, ushort8 mask); +short8 __ovld __cnfn shuffle2(short4 x, short4 y, ushort8 mask); +short8 __ovld __cnfn shuffle2(short8 x, short8 y, ushort8 mask); +short8 __ovld __cnfn shuffle2(short16 x, short16 y, ushort8 mask); + +ushort8 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort8 mask); +ushort8 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort8 mask); +ushort8 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort8 mask); +ushort8 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort8 mask); + +int8 __ovld __cnfn shuffle2(int2 x, int2 y, uint8 mask); +int8 __ovld __cnfn shuffle2(int4 x, int4 y, uint8 mask); +int8 __ovld __cnfn shuffle2(int8 x, int8 y, uint8 mask); +int8 __ovld __cnfn shuffle2(int16 x, int16 y, uint8 mask); + +uint8 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint8 mask); +uint8 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint8 mask); +uint8 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint8 mask); +uint8 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint8 mask); + +long8 __ovld __cnfn shuffle2(long2 x, long2 y, ulong8 mask); +long8 __ovld __cnfn shuffle2(long4 x, long4 y, ulong8 mask); +long8 __ovld __cnfn shuffle2(long8 x, long8 y, ulong8 mask); +long8 __ovld __cnfn shuffle2(long16 x, long16 y, ulong8 mask); + +ulong8 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong8 mask); +ulong8 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong8 mask); +ulong8 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong8 mask); +ulong8 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong8 mask); + +float8 __ovld __cnfn shuffle2(float2 x, float2 y, uint8 mask); +float8 __ovld __cnfn shuffle2(float4 x, float4 y, uint8 mask); +float8 __ovld __cnfn shuffle2(float8 x, float8 y, uint8 mask); +float8 __ovld __cnfn shuffle2(float16 x, float16 y, uint8 mask); + +char16 __ovld __cnfn shuffle2(char2 x, char2 y, uchar16 mask); +char16 __ovld __cnfn shuffle2(char4 x, char4 y, uchar16 mask); +char16 __ovld __cnfn shuffle2(char8 x, char8 y, uchar16 mask); +char16 __ovld __cnfn shuffle2(char16 x, char16 y, uchar16 mask); + +uchar16 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar16 mask); +uchar16 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar16 mask); +uchar16 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar16 mask); +uchar16 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar16 mask); + +short16 __ovld __cnfn shuffle2(short2 x, short2 y, ushort16 mask); +short16 __ovld __cnfn shuffle2(short4 x, short4 y, ushort16 mask); +short16 __ovld __cnfn shuffle2(short8 x, short8 y, ushort16 mask); +short16 __ovld __cnfn shuffle2(short16 x, short16 y, ushort16 mask); + +ushort16 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort16 mask); +ushort16 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort16 mask); +ushort16 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort16 mask); +ushort16 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort16 mask); + +int16 __ovld __cnfn shuffle2(int2 x, int2 y, uint16 mask); +int16 __ovld __cnfn shuffle2(int4 x, int4 y, uint16 mask); +int16 __ovld __cnfn shuffle2(int8 x, int8 y, uint16 mask); +int16 __ovld __cnfn shuffle2(int16 x, int16 y, uint16 mask); + +uint16 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint16 mask); +uint16 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint16 mask); +uint16 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint16 mask); +uint16 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint16 mask); + +long16 __ovld __cnfn shuffle2(long2 x, long2 y, ulong16 mask); +long16 __ovld __cnfn shuffle2(long4 x, long4 y, ulong16 mask); +long16 __ovld __cnfn shuffle2(long8 x, long8 y, ulong16 mask); +long16 __ovld __cnfn shuffle2(long16 x, long16 y, ulong16 mask); + +ulong16 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong16 mask); +ulong16 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong16 mask); +ulong16 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong16 mask); +ulong16 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong16 mask); + +float16 __ovld __cnfn shuffle2(float2 x, float2 y, uint16 mask); +float16 __ovld __cnfn shuffle2(float4 x, float4 y, uint16 mask); +float16 __ovld __cnfn shuffle2(float8 x, float8 y, uint16 mask); +float16 __ovld __cnfn shuffle2(float16 x, float16 y, uint16 mask); + +#ifdef cl_khr_fp64 +double2 __ovld __cnfn shuffle2(double2 x, double2 y, ulong2 mask); +double2 __ovld __cnfn shuffle2(double4 x, double4 y, ulong2 mask); +double2 __ovld __cnfn shuffle2(double8 x, double8 y, ulong2 mask); +double2 __ovld __cnfn shuffle2(double16 x, double16 y, ulong2 mask); + +double4 __ovld __cnfn shuffle2(double2 x, double2 y, ulong4 mask); +double4 __ovld __cnfn shuffle2(double4 x, double4 y, ulong4 mask); +double4 __ovld __cnfn shuffle2(double8 x, double8 y, ulong4 mask); +double4 __ovld __cnfn shuffle2(double16 x, double16 y, ulong4 mask); + +double8 __ovld __cnfn shuffle2(double2 x, double2 y, ulong8 mask); +double8 __ovld __cnfn shuffle2(double4 x, double4 y, ulong8 mask); +double8 __ovld __cnfn shuffle2(double8 x, double8 y, ulong8 mask); +double8 __ovld __cnfn shuffle2(double16 x, double16 y, ulong8 mask); + +double16 __ovld __cnfn shuffle2(double2 x, double2 y, ulong16 mask); +double16 __ovld __cnfn shuffle2(double4 x, double4 y, ulong16 mask); +double16 __ovld __cnfn shuffle2(double8 x, double8 y, ulong16 mask); +double16 __ovld __cnfn shuffle2(double16 x, double16 y, ulong16 mask); +#endif //cl_khr_fp64 + +#ifdef cl_khr_fp16 +half2 __ovld __cnfn shuffle2(half2 x, half2 y, ushort2 mask); +half2 __ovld __cnfn shuffle2(half4 x, half4 y, ushort2 mask); +half2 __ovld __cnfn shuffle2(half8 x, half8 y, ushort2 mask); +half2 __ovld __cnfn shuffle2(half16 x, half16 y, ushort2 mask); + +half4 __ovld __cnfn shuffle2(half2 x, half2 y, ushort4 mask); +half4 __ovld __cnfn shuffle2(half4 x, half4 y, ushort4 mask); +half4 __ovld __cnfn shuffle2(half8 x, half8 y, ushort4 mask); +half4 __ovld __cnfn shuffle2(half16 x, half16 y, ushort4 mask); + +half8 __ovld __cnfn shuffle2(half2 x, half2 y, ushort8 mask); +half8 __ovld __cnfn shuffle2(half4 x, half4 y, ushort8 mask); +half8 __ovld __cnfn shuffle2(half8 x, half8 y, ushort8 mask); +half8 __ovld __cnfn shuffle2(half16 x, half16 y, ushort8 mask); + +half16 __ovld __cnfn shuffle2(half2 x, half2 y, ushort16 mask); +half16 __ovld __cnfn shuffle2(half4 x, half4 y, ushort16 mask); +half16 __ovld __cnfn shuffle2(half8 x, half8 y, ushort16 mask); +half16 __ovld __cnfn shuffle2(half16 x, half16 y, ushort16 mask); +#endif //cl_khr_fp16 + +#if __OPENCL_C_VERSION__ >= CL_VERSION_1_2 +// OpenCL v1.2 s6.12.13, v2.0 s6.13.13 - printf + +int printf(__constant const char* st, ...); +#endif + +// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions + +// These values need to match the runtime equivalent +// +// Addressing Mode. +// +#define CLK_ADDRESS_NONE 0 +#define CLK_ADDRESS_CLAMP_TO_EDGE 2 +#define CLK_ADDRESS_CLAMP 4 +#define CLK_ADDRESS_REPEAT 6 +#define CLK_ADDRESS_MIRRORED_REPEAT 8 + +// +// Coordination Normalization +// +#define CLK_NORMALIZED_COORDS_FALSE 0 +#define CLK_NORMALIZED_COORDS_TRUE 1 + +// +// Filtering Mode. +// +#define CLK_FILTER_NEAREST 0x10 +#define CLK_FILTER_LINEAR 0x20 + +#ifdef cl_khr_gl_msaa_sharing +#pragma OPENCL EXTENSION cl_khr_gl_msaa_sharing : enable +#endif //cl_khr_gl_msaa_sharing + +/** + * Use the coordinate (coord.xy) to do an element lookup in + * the 2D image object specified by image. + * + * Use the coordinate (coord.x, coord.y, coord.z) to do + * an element lookup in the 3D image object specified + * by image. coord.w is ignored. + * + * Use the coordinate (coord.z) to index into the + * 2D image array object specified by image_array + * and (coord.x, coord.y) to do an element lookup in + * the 2D image object specified by image. + * + * Use the coordinate (x) to do an element lookup in + * the 1D image object specified by image. + * + * Use the coordinate (coord.y) to index into the + * 1D image array object specified by image_array + * and (coord.x) to do an element lookup in + * the 1D image object specified by image. + * + * Use the coordinate (cood.xy) and sample to do an + * element lookup in the 2D multi-sample image specified + * by image. + * + * Use coord.xy and sample to do an element + * lookup in the 2D multi-sample image layer + * identified by index coord.z in the 2D multi-sample + * image array specified by image. + * + * For mipmap images, use the mip-level specified by + * the Level-of-Detail (lod) or use gradients for LOD + * computation. + * + * read_imagef returns floating-point values in the + * range [0.0 ... 1.0] for image objects created with + * image_channel_data_type set to one of the predefined + * packed formats or CL_UNORM_INT8, or + * CL_UNORM_INT16. + * + * read_imagef returns floating-point values in the + * range [-1.0 ... 1.0] for image objects created with + * image_channel_data_type set to CL_SNORM_INT8, + * or CL_SNORM_INT16. + * + * read_imagef returns floating-point values for image + * objects created with image_channel_data_type set to + * CL_HALF_FLOAT or CL_FLOAT. + * + * read_imagei and read_imageui return + * unnormalized signed integer and unsigned integer + * values respectively. Each channel will be stored in a + * 32-bit integer. + * + * read_imagei can only be used with image objects + * created with image_channel_data_type set to one of + * the following values: + * CL_SIGNED_INT8, + * CL_SIGNED_INT16 and + * CL_SIGNED_INT32. + * If the image_channel_data_type is not one of the + * above values, the values returned by read_imagei + * are undefined. + * + * read_imageui can only be used with image objects + * created with image_channel_data_type set to one of + * the following values: + * CL_UNSIGNED_INT8, + * CL_UNSIGNED_INT16 and + * CL_UNSIGNED_INT32. + * If the image_channel_data_type is not one of the + * above values, the values returned by read_imageui + * are undefined. + * + * The read_image{i|ui} calls support a nearest filter + * only. The filter_mode specified in sampler + * must be set to CLK_FILTER_NEAREST; otherwise + * the values returned are undefined. + + * The read_image{f|i|ui} calls that take + * integer coordinates must use a sampler with + * normalized coordinates set to + * CLK_NORMALIZED_COORDS_FALSE and + * addressing mode set to + * CLK_ADDRESS_CLAMP_TO_EDGE, + * CLK_ADDRESS_CLAMP or CLK_ADDRESS_NONE; + * otherwise the values returned are undefined. + * + * Values returned by read_imagef for image objects + * with image_channel_data_type values not specified + * in the description above are undefined. + */ + +float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, int2 coord); +float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord); + +int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, int2 coord); +int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord); +uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, int2 coord); +uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord); + +float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, int4 coord); +float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord); + +int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, int4 coord); +int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord); +uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, int4 coord); +uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord); + +float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, int4 coord); +float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord); + +int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, int4 coord); +int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord); +uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, int4 coord); +uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord); + +float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, int coord); +float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord); + +int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, int coord); +int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord); +uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, int coord); +uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord); + +float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, int2 coord); +float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord); + +int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, int2 coord); +int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord); +uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, int2 coord); +uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord); + +#ifdef cl_khr_depth_images +float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord); +float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, int2 coord); + +float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord); +float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, int4 coord); +#endif //cl_khr_depth_images + +#if defined(cl_khr_gl_msaa_sharing) +float4 __purefn __ovld read_imagef(read_only image2d_msaa_t image, int2 coord, int sample); +int4 __purefn __ovld read_imagei(read_only image2d_msaa_t image, int2 coord, int sample); +uint4 __purefn __ovld read_imageui(read_only image2d_msaa_t image, int2 coord, int sample); + +float __purefn __ovld read_imagef(read_only image2d_msaa_depth_t image, int2 coord, int sample); + +float4 __purefn __ovld read_imagef(read_only image2d_array_msaa_t image, int4 coord, int sample); +int4 __purefn __ovld read_imagei(read_only image2d_array_msaa_t image, int4 coord, int sample); +uint4 __purefn __ovld read_imageui(read_only image2d_array_msaa_t image, int4 coord, int sample); + +float __purefn __ovld read_imagef(read_only image2d_array_msaa_depth_t image, int4 coord, int sample); +#endif //cl_khr_gl_msaa_sharing + +// OpenCL Extension v2.0 s9.18 - Mipmaps +#ifdef cl_khr_mipmap_image + +float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float lod); +int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord, float lod); +uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord, float lod); + +float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod); +int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod); +uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod); + +float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float lod); +int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float lod); +uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float lod); + +float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float lod); + +float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod); +int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod); +uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod); + +float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod); + +float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float lod); +int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float lod); +uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float lod); + +float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY); +int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY); +uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY); + +float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY); +int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY); +uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY); + +float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY); +int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY); +uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY); + +float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY); + +float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY); +int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY); +uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY); + +float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY); + +float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY); +int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY); +uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY); + +float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float lod); +int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord, float lod); +uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord, float lod); + +float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod); +int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod); +uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod); + +float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float lod); +int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float lod); +uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float lod); + +float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float lod); + +float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod); +int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod); +uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod); + +float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod); + +float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float lod); +int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float lod); +uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float lod); + +#endif //cl_khr_mipmap_image + +/** +* Sampler-less Image Access +*/ + +float4 __purefn __ovld read_imagef(read_only image1d_t image, int coord); +int4 __purefn __ovld read_imagei(read_only image1d_t image, int coord); +uint4 __purefn __ovld read_imageui(read_only image1d_t image, int coord); + +float4 __purefn __ovld read_imagef(read_only image1d_buffer_t image, int coord); +int4 __purefn __ovld read_imagei(read_only image1d_buffer_t image, int coord); +uint4 __purefn __ovld read_imageui(read_only image1d_buffer_t image, int coord); + +float4 __purefn __ovld read_imagef(read_only image1d_array_t image, int2 coord); +int4 __purefn __ovld read_imagei(read_only image1d_array_t image, int2 coord); +uint4 __purefn __ovld read_imageui(read_only image1d_array_t image, int2 coord); + +float4 __purefn __ovld read_imagef(read_only image2d_t image, int2 coord); +int4 __purefn __ovld read_imagei(read_only image2d_t image, int2 coord); +uint4 __purefn __ovld read_imageui(read_only image2d_t image, int2 coord); + +float4 __purefn __ovld read_imagef(read_only image2d_array_t image, int4 coord); +int4 __purefn __ovld read_imagei(read_only image2d_array_t image, int4 coord); +uint4 __purefn __ovld read_imageui(read_only image2d_array_t image, int4 coord); + +#ifdef cl_khr_depth_images +float __purefn __ovld read_imagef(read_only image2d_depth_t image, int2 coord); +float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, int4 coord); +#endif //cl_khr_depth_images + +float4 __purefn __ovld read_imagef(read_only image3d_t image, int4 coord); +int4 __purefn __ovld read_imagei(read_only image3d_t image, int4 coord); +uint4 __purefn __ovld read_imageui(read_only image3d_t image, int4 coord); + +// Image read functions returning half4 type +#ifdef cl_khr_fp16 +half4 __purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler, int coord); +half4 __purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler, float coord); +half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, int2 coord); +half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, float2 coord); +half4 __purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, int2 coord); +half4 __purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, float2 coord); +half4 __purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, int4 coord); +half4 __purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, float4 coord); +half4 __purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, int4 coord); +half4 __purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, float4 coord); +half4 __purefn __ovld read_imageh(read_only image1d_t image, int coord); +half4 __purefn __ovld read_imageh(read_only image2d_t image, int2 coord); +half4 __purefn __ovld read_imageh(read_only image3d_t image, int4 coord); +half4 __purefn __ovld read_imageh(read_only image1d_array_t image, int2 coord); +half4 __purefn __ovld read_imageh(read_only image2d_array_t image, int4 coord); +half4 __purefn __ovld read_imageh(read_only image1d_buffer_t image, int coord); +#endif //cl_khr_fp16 + +// Image read functions for read_write images +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +float4 __purefn __ovld read_imagef(read_write image1d_t image, int coord); +int4 __purefn __ovld read_imagei(read_write image1d_t image, int coord); +uint4 __purefn __ovld read_imageui(read_write image1d_t image, int coord); + +float4 __purefn __ovld read_imagef(read_write image1d_buffer_t image, int coord); +int4 __purefn __ovld read_imagei(read_write image1d_buffer_t image, int coord); +uint4 __purefn __ovld read_imageui(read_write image1d_buffer_t image, int coord); + +float4 __purefn __ovld read_imagef(read_write image1d_array_t image, int2 coord); +int4 __purefn __ovld read_imagei(read_write image1d_array_t image, int2 coord); +uint4 __purefn __ovld read_imageui(read_write image1d_array_t image, int2 coord); + +float4 __purefn __ovld read_imagef(read_write image2d_t image, int2 coord); +int4 __purefn __ovld read_imagei(read_write image2d_t image, int2 coord); +uint4 __purefn __ovld read_imageui(read_write image2d_t image, int2 coord); + +float4 __purefn __ovld read_imagef(read_write image2d_array_t image, int4 coord); +int4 __purefn __ovld read_imagei(read_write image2d_array_t image, int4 coord); +uint4 __purefn __ovld read_imageui(read_write image2d_array_t image, int4 coord); + +float4 __purefn __ovld read_imagef(read_write image3d_t image, int4 coord); +int4 __purefn __ovld read_imagei(read_write image3d_t image, int4 coord); +uint4 __purefn __ovld read_imageui(read_write image3d_t image, int4 coord); + +#ifdef cl_khr_depth_images +float __purefn __ovld read_imagef(read_write image2d_depth_t image, int2 coord); +float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, int4 coord); +#endif //cl_khr_depth_images + +#if cl_khr_gl_msaa_sharing +float4 __purefn __ovld read_imagef(read_write image2d_msaa_t image, int2 coord, int sample); +int4 __purefn __ovld read_imagei(read_write image2d_msaa_t image, int2 coord, int sample); +uint4 __purefn __ovld read_imageui(read_write image2d_msaa_t image, int2 coord, int sample); + +float4 __purefn __ovld read_imagef(read_write image2d_array_msaa_t image, int4 coord, int sample); +int4 __purefn __ovld read_imagei(read_write image2d_array_msaa_t image, int4 coord, int sample); +uint4 __purefn __ovld read_imageui(read_write image2d_array_msaa_t image, int4 coord, int sample); + +float __purefn __ovld read_imagef(read_write image2d_msaa_depth_t image, int2 coord, int sample); +float __purefn __ovld read_imagef(read_write image2d_array_msaa_depth_t image, int4 coord, int sample); +#endif //cl_khr_gl_msaa_sharing + +#ifdef cl_khr_mipmap_image +float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float lod); +int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float lod); +uint4 __purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler, float coord, float lod); + +float4 __purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod); +int4 __purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod); +uint4 __purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod); + +float4 __purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float lod); +int4 __purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float lod); +uint4 __purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float lod); + +float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float lod); + +float4 __purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod); +int4 __purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod); +uint4 __purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod); + +float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod); + +float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float lod); +int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float lod); +uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float lod); + +float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY); +int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY); +uint4 __purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY); + +float4 __purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY); +int4 __purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY); +uint4 __purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY); + +float4 __purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY); +int4 __purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY); +uint4 __purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY); + +float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY); + +float4 __purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY); +int4 __purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY); +uint4 __purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY); + +float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY); + +float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY); +int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY); +uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY); + +float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float lod); +int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float lod); +uint4 __purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler, float coord, float lod); + +float4 __purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod); +int4 __purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod); +uint4 __purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod); + +float4 __purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float lod); +int4 __purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float lod); +uint4 __purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float lod); + +float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float lod); + +float4 __purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod); +int4 __purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod); +uint4 __purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod); + +float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod); + +float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float lod); +int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float lod); +uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float lod); +#endif //cl_khr_mipmap_image + +// Image read functions returning half4 type +#ifdef cl_khr_fp16 +half4 __purefn __ovld read_imageh(read_write image1d_t image, int coord); +half4 __purefn __ovld read_imageh(read_write image2d_t image, int2 coord); +half4 __purefn __ovld read_imageh(read_write image3d_t image, int4 coord); +half4 __purefn __ovld read_imageh(read_write image1d_array_t image, int2 coord); +half4 __purefn __ovld read_imageh(read_write image2d_array_t image, int4 coord); +half4 __purefn __ovld read_imageh(read_write image1d_buffer_t image, int coord); +#endif //cl_khr_fp16 +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +/** + * Write color value to location specified by coordinate + * (coord.x, coord.y) in the 2D image object specified by image. + * (coord.x, coord.y) are considered to be unnormalized coordinates + * and must be in the range 0 ... image width - 1, and 0 + * ... image height - 1. + + * Write color value to location specified by coordinate + * (coord.x, coord.y) in the 2D image object specified by index + * (coord.z) of the 2D image array object image_array. + * (coord.x, coord.y) are considered to be unnormalized + * coordinates and must be in the range 0 ... image width + * - 1. + * + * Write color value to location specified by coordinate + * (coord) in the 1D image (buffer) object specified by image. + * coord is considered to be unnormalized coordinates + * and must be in the range 0 ... image width - 1. + * + * Write color value to location specified by coordinate + * (coord.x) in the 1D image object specified by index + * (coord.y) of the 1D image array object image_array. + * x is considered to be unnormalized coordinates + * and must be in the range 0 ... image width - 1. + * + * Write color value to location specified by coordinate + * (coord.x, coord.y, coord.z) in the 3D image object specified by image. + * coord.x & coord.y are considered to be unnormalized coordinates + * and must be in the range 0 ... image width - 1, and 0 + * ... image height - 1. + * + * For mipmap images, use mip-level specified by lod. + * + * Appropriate data format conversion to the specified + * image format is done before writing the color value. + * + * write_imagef can only be used with image objects + * created with image_channel_data_type set to one of + * the pre-defined packed formats or set to + * CL_SNORM_INT8, CL_UNORM_INT8, + * CL_SNORM_INT16, CL_UNORM_INT16, + * CL_HALF_FLOAT or CL_FLOAT. Appropriate data + * format conversion will be done to convert channel + * data from a floating-point value to actual data format + * in which the channels are stored. + * + * write_imagei can only be used with image objects + * created with image_channel_data_type set to one of + * the following values: + * CL_SIGNED_INT8, + * CL_SIGNED_INT16 and + * CL_SIGNED_INT32. + * + * write_imageui can only be used with image objects + * created with image_channel_data_type set to one of + * the following values: + * CL_UNSIGNED_INT8, + * CL_UNSIGNED_INT16 and + * CL_UNSIGNED_INT32. + * + * The behavior of write_imagef, write_imagei and + * write_imageui for image objects created with + * image_channel_data_type values not specified in + * the description above or with (x, y) coordinate + * values that are not in the range (0 ... image width -1, + * 0 ... image height - 1), respectively, is undefined. + */ +void __ovld write_imagef(write_only image2d_t image, int2 coord, float4 color); +void __ovld write_imagei(write_only image2d_t image, int2 coord, int4 color); +void __ovld write_imageui(write_only image2d_t image, int2 coord, uint4 color); + +void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, float4 color); +void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int4 color); +void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, uint4 color); + +void __ovld write_imagef(write_only image1d_t image, int coord, float4 color); +void __ovld write_imagei(write_only image1d_t image, int coord, int4 color); +void __ovld write_imageui(write_only image1d_t image, int coord, uint4 color); + +void __ovld write_imagef(write_only image1d_buffer_t image, int coord, float4 color); +void __ovld write_imagei(write_only image1d_buffer_t image, int coord, int4 color); +void __ovld write_imageui(write_only image1d_buffer_t image, int coord, uint4 color); + +void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord, float4 color); +void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord, int4 color); +void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord, uint4 color); + +void __ovld write_imagef(write_only image3d_t image, int4 coord, float4 color); +void __ovld write_imagei(write_only image3d_t image, int4 coord, int4 color); +void __ovld write_imageui(write_only image3d_t image, int4 coord, uint4 color); + +#ifdef cl_khr_depth_images +void __ovld write_imagef(write_only image2d_depth_t image, int2 coord, float color); +void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, float color); +#endif //cl_khr_depth_images + +// OpenCL Extension v2.0 s9.18 - Mipmaps +#ifdef cl_khr_mipmap_image +void __ovld write_imagef(write_only image1d_t image, int coord, int lod, float4 color); +void __ovld write_imagei(write_only image1d_t image, int coord, int lod, int4 color); +void __ovld write_imageui(write_only image1d_t image, int coord, int lod, uint4 color); + +void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord, int lod, float4 color); +void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord, int lod, int4 color); +void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord, int lod, uint4 color); + +void __ovld write_imagef(write_only image2d_t image, int2 coord, int lod, float4 color); +void __ovld write_imagei(write_only image2d_t image, int2 coord, int lod, int4 color); +void __ovld write_imageui(write_only image2d_t image, int2 coord, int lod, uint4 color); + +void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, int lod, float4 color); +void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int lod, int4 color); +void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, int lod, uint4 color); + +void __ovld write_imagef(write_only image2d_depth_t image, int2 coord, int lod, float color); +void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, int lod, float color); + +void __ovld write_imagef(write_only image3d_t image, int4 coord, int lod, float4 color); +void __ovld write_imagei(write_only image3d_t image, int4 coord, int lod, int4 color); +void __ovld write_imageui(write_only image3d_t image, int4 coord, int lod, uint4 color); +#endif //cl_khr_mipmap_image + +// Image write functions for half4 type +#ifdef cl_khr_fp16 +void __ovld write_imageh(write_only image1d_t image, int coord, half4 color); +void __ovld write_imageh(write_only image2d_t image, int2 coord, half4 color); +void __ovld write_imageh(write_only image3d_t image, int4 coord, half4 color); +void __ovld write_imageh(write_only image1d_array_t image, int2 coord, half4 color); +void __ovld write_imageh(write_only image2d_array_t image, int4 coord, half4 color); +void __ovld write_imageh(write_only image1d_buffer_t image, int coord, half4 color); +#endif //cl_khr_fp16 + +// Image write functions for read_write images +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +void __ovld write_imagef(read_write image2d_t image, int2 coord, float4 color); +void __ovld write_imagei(read_write image2d_t image, int2 coord, int4 color); +void __ovld write_imageui(read_write image2d_t image, int2 coord, uint4 color); + +void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord, float4 color); +void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord, int4 color); +void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord, uint4 color); + +void __ovld write_imagef(read_write image1d_t image, int coord, float4 color); +void __ovld write_imagei(read_write image1d_t image, int coord, int4 color); +void __ovld write_imageui(read_write image1d_t image, int coord, uint4 color); + +void __ovld write_imagef(read_write image1d_buffer_t image, int coord, float4 color); +void __ovld write_imagei(read_write image1d_buffer_t image, int coord, int4 color); +void __ovld write_imageui(read_write image1d_buffer_t image, int coord, uint4 color); + +void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord, float4 color); +void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord, int4 color); +void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord, uint4 color); + +void __ovld write_imagef(read_write image3d_t image, int4 coord, float4 color); +void __ovld write_imagei(read_write image3d_t image, int4 coord, int4 color); +void __ovld write_imageui(read_write image3d_t image, int4 coord, uint4 color); + +#ifdef cl_khr_depth_images +void __ovld write_imagef(read_write image2d_depth_t image, int2 coord, float color); +void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, float color); +#endif //cl_khr_depth_images + +#ifdef cl_khr_mipmap_image +void __ovld write_imagef(read_write image1d_t image, int coord, int lod, float4 color); +void __ovld write_imagei(read_write image1d_t image, int coord, int lod, int4 color); +void __ovld write_imageui(read_write image1d_t image, int coord, int lod, uint4 color); + +void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord, int lod, float4 color); +void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord, int lod, int4 color); +void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord, int lod, uint4 color); + +void __ovld write_imagef(read_write image2d_t image, int2 coord, int lod, float4 color); +void __ovld write_imagei(read_write image2d_t image, int2 coord, int lod, int4 color); +void __ovld write_imageui(read_write image2d_t image, int2 coord, int lod, uint4 color); + +void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord, int lod, float4 color); +void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord, int lod, int4 color); +void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord, int lod, uint4 color); + +void __ovld write_imagef(read_write image2d_depth_t image, int2 coord, int lod, float color); +void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, int lod, float color); + +void __ovld write_imagef(read_write image3d_t image, int4 coord, int lod, float4 color); +void __ovld write_imagei(read_write image3d_t image, int4 coord, int lod, int4 color); +void __ovld write_imageui(read_write image3d_t image, int4 coord, int lod, uint4 color); +#endif //cl_khr_mipmap_image + +// Image write functions for half4 type +#ifdef cl_khr_fp16 +void __ovld write_imageh(read_write image1d_t image, int coord, half4 color); +void __ovld write_imageh(read_write image2d_t image, int2 coord, half4 color); +void __ovld write_imageh(read_write image3d_t image, int4 coord, half4 color); +void __ovld write_imageh(read_write image1d_array_t image, int2 coord, half4 color); +void __ovld write_imageh(read_write image2d_array_t image, int4 coord, half4 color); +void __ovld write_imageh(read_write image1d_buffer_t image, int coord, half4 color); +#endif //cl_khr_fp16 +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +// Note: In OpenCL v1.0/1.1/1.2, image argument of image query builtin functions does not have +// access qualifier, which by default assume read_only access qualifier. Image query builtin +// functions with write_only image argument should also be declared. + +/** + * Return the image width in pixels. + * + */ +int __ovld __cnfn get_image_width(read_only image1d_t image); +int __ovld __cnfn get_image_width(read_only image1d_buffer_t image); +int __ovld __cnfn get_image_width(read_only image2d_t image); +int __ovld __cnfn get_image_width(read_only image3d_t image); +int __ovld __cnfn get_image_width(read_only image1d_array_t image); +int __ovld __cnfn get_image_width(read_only image2d_array_t image); +#ifdef cl_khr_depth_images +int __ovld __cnfn get_image_width(read_only image2d_depth_t image); +int __ovld __cnfn get_image_width(read_only image2d_array_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int __ovld __cnfn get_image_width(read_only image2d_msaa_t image); +int __ovld __cnfn get_image_width(read_only image2d_msaa_depth_t image); +int __ovld __cnfn get_image_width(read_only image2d_array_msaa_t image); +int __ovld __cnfn get_image_width(read_only image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing + +int __ovld __cnfn get_image_width(write_only image1d_t image); +int __ovld __cnfn get_image_width(write_only image1d_buffer_t image); +int __ovld __cnfn get_image_width(write_only image2d_t image); +int __ovld __cnfn get_image_width(write_only image3d_t image); +int __ovld __cnfn get_image_width(write_only image1d_array_t image); +int __ovld __cnfn get_image_width(write_only image2d_array_t image); +#ifdef cl_khr_depth_images +int __ovld __cnfn get_image_width(write_only image2d_depth_t image); +int __ovld __cnfn get_image_width(write_only image2d_array_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int __ovld __cnfn get_image_width(write_only image2d_msaa_t image); +int __ovld __cnfn get_image_width(write_only image2d_msaa_depth_t image); +int __ovld __cnfn get_image_width(write_only image2d_array_msaa_t image); +int __ovld __cnfn get_image_width(write_only image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing + +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +int __ovld __cnfn get_image_width(read_write image1d_t image); +int __ovld __cnfn get_image_width(read_write image1d_buffer_t image); +int __ovld __cnfn get_image_width(read_write image2d_t image); +int __ovld __cnfn get_image_width(read_write image3d_t image); +int __ovld __cnfn get_image_width(read_write image1d_array_t image); +int __ovld __cnfn get_image_width(read_write image2d_array_t image); +#ifdef cl_khr_depth_images +int __ovld __cnfn get_image_width(read_write image2d_depth_t image); +int __ovld __cnfn get_image_width(read_write image2d_array_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int __ovld __cnfn get_image_width(read_write image2d_msaa_t image); +int __ovld __cnfn get_image_width(read_write image2d_msaa_depth_t image); +int __ovld __cnfn get_image_width(read_write image2d_array_msaa_t image); +int __ovld __cnfn get_image_width(read_write image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +/** + * Return the image height in pixels. + */ +int __ovld __cnfn get_image_height(read_only image2d_t image); +int __ovld __cnfn get_image_height(read_only image3d_t image); +int __ovld __cnfn get_image_height(read_only image2d_array_t image); +#ifdef cl_khr_depth_images +int __ovld __cnfn get_image_height(read_only image2d_depth_t image); +int __ovld __cnfn get_image_height(read_only image2d_array_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int __ovld __cnfn get_image_height(read_only image2d_msaa_t image); +int __ovld __cnfn get_image_height(read_only image2d_msaa_depth_t image); +int __ovld __cnfn get_image_height(read_only image2d_array_msaa_t image); +int __ovld __cnfn get_image_height(read_only image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing + +int __ovld __cnfn get_image_height(write_only image2d_t image); +int __ovld __cnfn get_image_height(write_only image3d_t image); +int __ovld __cnfn get_image_height(write_only image2d_array_t image); +#ifdef cl_khr_depth_images +int __ovld __cnfn get_image_height(write_only image2d_depth_t image); +int __ovld __cnfn get_image_height(write_only image2d_array_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int __ovld __cnfn get_image_height(write_only image2d_msaa_t image); +int __ovld __cnfn get_image_height(write_only image2d_msaa_depth_t image); +int __ovld __cnfn get_image_height(write_only image2d_array_msaa_t image); +int __ovld __cnfn get_image_height(write_only image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing + +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +int __ovld __cnfn get_image_height(read_write image2d_t image); +int __ovld __cnfn get_image_height(read_write image3d_t image); +int __ovld __cnfn get_image_height(read_write image2d_array_t image); +#ifdef cl_khr_depth_images +int __ovld __cnfn get_image_height(read_write image2d_depth_t image); +int __ovld __cnfn get_image_height(read_write image2d_array_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int __ovld __cnfn get_image_height(read_write image2d_msaa_t image); +int __ovld __cnfn get_image_height(read_write image2d_msaa_depth_t image); +int __ovld __cnfn get_image_height(read_write image2d_array_msaa_t image); +int __ovld __cnfn get_image_height(read_write image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +/** + * Return the image depth in pixels. + */ +int __ovld __cnfn get_image_depth(read_only image3d_t image); + +int __ovld __cnfn get_image_depth(write_only image3d_t image); + +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +int __ovld __cnfn get_image_depth(read_write image3d_t image); +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +// OpenCL Extension v2.0 s9.18 - Mipmaps +#ifdef cl_khr_mipmap_image +/** + * Return the image miplevels. + */ + +int __ovld get_image_num_mip_levels(read_only image1d_t image); +int __ovld get_image_num_mip_levels(read_only image2d_t image); +int __ovld get_image_num_mip_levels(read_only image3d_t image); + +int __ovld get_image_num_mip_levels(write_only image1d_t image); +int __ovld get_image_num_mip_levels(write_only image2d_t image); +int __ovld get_image_num_mip_levels(write_only image3d_t image); + +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +int __ovld get_image_num_mip_levels(read_write image1d_t image); +int __ovld get_image_num_mip_levels(read_write image2d_t image); +int __ovld get_image_num_mip_levels(read_write image3d_t image); +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +int __ovld get_image_num_mip_levels(read_only image1d_array_t image); +int __ovld get_image_num_mip_levels(read_only image2d_array_t image); +int __ovld get_image_num_mip_levels(read_only image2d_array_depth_t image); +int __ovld get_image_num_mip_levels(read_only image2d_depth_t image); + +int __ovld get_image_num_mip_levels(write_only image1d_array_t image); +int __ovld get_image_num_mip_levels(write_only image2d_array_t image); +int __ovld get_image_num_mip_levels(write_only image2d_array_depth_t image); +int __ovld get_image_num_mip_levels(write_only image2d_depth_t image); + +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +int __ovld get_image_num_mip_levels(read_write image1d_array_t image); +int __ovld get_image_num_mip_levels(read_write image2d_array_t image); +int __ovld get_image_num_mip_levels(read_write image2d_array_depth_t image); +int __ovld get_image_num_mip_levels(read_write image2d_depth_t image); +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +#endif //cl_khr_mipmap_image + +/** + * Return the channel data type. Valid values are: + * CLK_SNORM_INT8 + * CLK_SNORM_INT16 + * CLK_UNORM_INT8 + * CLK_UNORM_INT16 + * CLK_UNORM_SHORT_565 + * CLK_UNORM_SHORT_555 + * CLK_UNORM_SHORT_101010 + * CLK_SIGNED_INT8 + * CLK_SIGNED_INT16 + * CLK_SIGNED_INT32 + * CLK_UNSIGNED_INT8 + * CLK_UNSIGNED_INT16 + * CLK_UNSIGNED_INT32 + * CLK_HALF_FLOAT + * CLK_FLOAT + */ + +// +// Channel Datatype. +// +#define CLK_SNORM_INT8 0x10D0 +#define CLK_SNORM_INT16 0x10D1 +#define CLK_UNORM_INT8 0x10D2 +#define CLK_UNORM_INT16 0x10D3 +#define CLK_UNORM_SHORT_565 0x10D4 +#define CLK_UNORM_SHORT_555 0x10D5 +#define CLK_UNORM_INT_101010 0x10D6 +#define CLK_SIGNED_INT8 0x10D7 +#define CLK_SIGNED_INT16 0x10D8 +#define CLK_SIGNED_INT32 0x10D9 +#define CLK_UNSIGNED_INT8 0x10DA +#define CLK_UNSIGNED_INT16 0x10DB +#define CLK_UNSIGNED_INT32 0x10DC +#define CLK_HALF_FLOAT 0x10DD +#define CLK_FLOAT 0x10DE +#define CLK_UNORM_INT24 0x10DF + +int __ovld __cnfn get_image_channel_data_type(read_only image1d_t image); +int __ovld __cnfn get_image_channel_data_type(read_only image1d_buffer_t image); +int __ovld __cnfn get_image_channel_data_type(read_only image2d_t image); +int __ovld __cnfn get_image_channel_data_type(read_only image3d_t image); +int __ovld __cnfn get_image_channel_data_type(read_only image1d_array_t image); +int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_t image); +#ifdef cl_khr_depth_images +int __ovld __cnfn get_image_channel_data_type(read_only image2d_depth_t image); +int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_t image); +int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_depth_t image); +int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_t image); +int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing + +int __ovld __cnfn get_image_channel_data_type(write_only image1d_t image); +int __ovld __cnfn get_image_channel_data_type(write_only image1d_buffer_t image); +int __ovld __cnfn get_image_channel_data_type(write_only image2d_t image); +int __ovld __cnfn get_image_channel_data_type(write_only image3d_t image); +int __ovld __cnfn get_image_channel_data_type(write_only image1d_array_t image); +int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_t image); +#ifdef cl_khr_depth_images +int __ovld __cnfn get_image_channel_data_type(write_only image2d_depth_t image); +int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_t image); +int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_depth_t image); +int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_t image); +int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing + +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +int __ovld __cnfn get_image_channel_data_type(read_write image1d_t image); +int __ovld __cnfn get_image_channel_data_type(read_write image1d_buffer_t image); +int __ovld __cnfn get_image_channel_data_type(read_write image2d_t image); +int __ovld __cnfn get_image_channel_data_type(read_write image3d_t image); +int __ovld __cnfn get_image_channel_data_type(read_write image1d_array_t image); +int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_t image); +#ifdef cl_khr_depth_images +int __ovld __cnfn get_image_channel_data_type(read_write image2d_depth_t image); +int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_t image); +int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_depth_t image); +int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_t image); +int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +/** + * Return the image channel order. Valid values are: + * CLK_A + * CLK_R + * CLK_Rx + * CLK_RG + * CLK_RGx + * CLK_RA + * CLK_RGB + * CLK_RGBx + * CLK_RGBA + * CLK_ARGB + * CLK_BGRA + * CLK_INTENSITY + * CLK_LUMINANCE + */ +// Channel order, numbering must be aligned with cl_channel_order in cl.h +// +#define CLK_R 0x10B0 +#define CLK_A 0x10B1 +#define CLK_RG 0x10B2 +#define CLK_RA 0x10B3 +#define CLK_RGB 0x10B4 +#define CLK_RGBA 0x10B5 +#define CLK_BGRA 0x10B6 +#define CLK_ARGB 0x10B7 +#define CLK_INTENSITY 0x10B8 +#define CLK_LUMINANCE 0x10B9 +#define CLK_Rx 0x10BA +#define CLK_RGx 0x10BB +#define CLK_RGBx 0x10BC +#define CLK_DEPTH 0x10BD +#define CLK_DEPTH_STENCIL 0x10BE +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +#define CLK_sRGB 0x10BF +#define CLK_sRGBA 0x10C1 +#define CLK_sRGBx 0x10C0 +#define CLK_sBGRA 0x10C2 +#define CLK_ABGR 0x10C3 +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +int __ovld __cnfn get_image_channel_order(read_only image1d_t image); +int __ovld __cnfn get_image_channel_order(read_only image1d_buffer_t image); +int __ovld __cnfn get_image_channel_order(read_only image2d_t image); +int __ovld __cnfn get_image_channel_order(read_only image3d_t image); +int __ovld __cnfn get_image_channel_order(read_only image1d_array_t image); +int __ovld __cnfn get_image_channel_order(read_only image2d_array_t image); +#ifdef cl_khr_depth_images +int __ovld __cnfn get_image_channel_order(read_only image2d_depth_t image); +int __ovld __cnfn get_image_channel_order(read_only image2d_array_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_t image); +int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_depth_t image); +int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_t image); +int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing + +int __ovld __cnfn get_image_channel_order(write_only image1d_t image); +int __ovld __cnfn get_image_channel_order(write_only image1d_buffer_t image); +int __ovld __cnfn get_image_channel_order(write_only image2d_t image); +int __ovld __cnfn get_image_channel_order(write_only image3d_t image); +int __ovld __cnfn get_image_channel_order(write_only image1d_array_t image); +int __ovld __cnfn get_image_channel_order(write_only image2d_array_t image); +#ifdef cl_khr_depth_images +int __ovld __cnfn get_image_channel_order(write_only image2d_depth_t image); +int __ovld __cnfn get_image_channel_order(write_only image2d_array_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_t image); +int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_depth_t image); +int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_t image); +int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing + +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +int __ovld __cnfn get_image_channel_order(read_write image1d_t image); +int __ovld __cnfn get_image_channel_order(read_write image1d_buffer_t image); +int __ovld __cnfn get_image_channel_order(read_write image2d_t image); +int __ovld __cnfn get_image_channel_order(read_write image3d_t image); +int __ovld __cnfn get_image_channel_order(read_write image1d_array_t image); +int __ovld __cnfn get_image_channel_order(read_write image2d_array_t image); +#ifdef cl_khr_depth_images +int __ovld __cnfn get_image_channel_order(read_write image2d_depth_t image); +int __ovld __cnfn get_image_channel_order(read_write image2d_array_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_t image); +int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_depth_t image); +int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_t image); +int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +/** + * Return the 2D image width and height as an int2 + * type. The width is returned in the x component, and + * the height in the y component. + */ +int2 __ovld __cnfn get_image_dim(read_only image2d_t image); +int2 __ovld __cnfn get_image_dim(read_only image2d_array_t image); +#ifdef cl_khr_depth_images +int2 __ovld __cnfn get_image_dim(read_only image2d_array_depth_t image); +int2 __ovld __cnfn get_image_dim(read_only image2d_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_t image); +int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_depth_t image); +int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_t image); +int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing + +int2 __ovld __cnfn get_image_dim(write_only image2d_t image); +int2 __ovld __cnfn get_image_dim(write_only image2d_array_t image); +#ifdef cl_khr_depth_images +int2 __ovld __cnfn get_image_dim(write_only image2d_array_depth_t image); +int2 __ovld __cnfn get_image_dim(write_only image2d_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_t image); +int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_depth_t image); +int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_t image); +int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing + +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +int2 __ovld __cnfn get_image_dim(read_write image2d_t image); +int2 __ovld __cnfn get_image_dim(read_write image2d_array_t image); +#ifdef cl_khr_depth_images +int2 __ovld __cnfn get_image_dim(read_write image2d_array_depth_t image); +int2 __ovld __cnfn get_image_dim(read_write image2d_depth_t image); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_t image); +int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_depth_t image); +int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_t image); +int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_depth_t image); +#endif //cl_khr_gl_msaa_sharing +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +/** + * Return the 3D image width, height, and depth as an + * int4 type. The width is returned in the x + * component, height in the y component, depth in the z + * component and the w component is 0. + */ +int4 __ovld __cnfn get_image_dim(read_only image3d_t image); +int4 __ovld __cnfn get_image_dim(write_only image3d_t image); +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +int4 __ovld __cnfn get_image_dim(read_write image3d_t image); +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +/** + * Return the image array size. + */ + +size_t __ovld __cnfn get_image_array_size(read_only image1d_array_t image_array); +size_t __ovld __cnfn get_image_array_size(read_only image2d_array_t image_array); +#ifdef cl_khr_depth_images +size_t __ovld __cnfn get_image_array_size(read_only image2d_array_depth_t image_array); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_t image_array); +size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_depth_t image_array); +#endif //cl_khr_gl_msaa_sharing + +size_t __ovld __cnfn get_image_array_size(write_only image1d_array_t image_array); +size_t __ovld __cnfn get_image_array_size(write_only image2d_array_t image_array); +#ifdef cl_khr_depth_images +size_t __ovld __cnfn get_image_array_size(write_only image2d_array_depth_t image_array); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_t image_array); +size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_depth_t image_array); +#endif //cl_khr_gl_msaa_sharing + +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +size_t __ovld __cnfn get_image_array_size(read_write image1d_array_t image_array); +size_t __ovld __cnfn get_image_array_size(read_write image2d_array_t image_array); +#ifdef cl_khr_depth_images +size_t __ovld __cnfn get_image_array_size(read_write image2d_array_depth_t image_array); +#endif //cl_khr_depth_images +#if defined(cl_khr_gl_msaa_sharing) +size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_t image_array); +size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_depth_t image_array); +#endif //cl_khr_gl_msaa_sharing +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +/** +* Return the number of samples associated with image +*/ +#if defined(cl_khr_gl_msaa_sharing) +int __ovld get_image_num_samples(read_only image2d_msaa_t image); +int __ovld get_image_num_samples(read_only image2d_msaa_depth_t image); +int __ovld get_image_num_samples(read_only image2d_array_msaa_depth_t image); +int __ovld get_image_num_samples(read_only image2d_array_msaa_t image); +int __ovld get_image_num_samples(read_only image2d_array_msaa_depth_t image); + +int __ovld get_image_num_samples(write_only image2d_msaa_t image); +int __ovld get_image_num_samples(write_only image2d_msaa_depth_t image); +int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image); +int __ovld get_image_num_samples(write_only image2d_array_msaa_t image); +int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image); + +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +int __ovld get_image_num_samples(read_write image2d_msaa_t image); +int __ovld get_image_num_samples(read_write image2d_msaa_depth_t image); +int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image); +int __ovld get_image_num_samples(read_write image2d_array_msaa_t image); +int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image); +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 +#endif + +// OpenCL v2.0 s6.13.15 - Work-group Functions + +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +int __ovld __conv work_group_all(int predicate); +int __ovld __conv work_group_any(int predicate); + +#ifdef cl_khr_fp16 +half __ovld __conv work_group_broadcast(half a, size_t local_id); +half __ovld __conv work_group_broadcast(half a, size_t x, size_t y); +half __ovld __conv work_group_broadcast(half a, size_t x, size_t y, size_t z); +#endif +int __ovld __conv work_group_broadcast(int a, size_t local_id); +int __ovld __conv work_group_broadcast(int a, size_t x, size_t y); +int __ovld __conv work_group_broadcast(int a, size_t x, size_t y, size_t z); +uint __ovld __conv work_group_broadcast(uint a, size_t local_id); +uint __ovld __conv work_group_broadcast(uint a, size_t x, size_t y); +uint __ovld __conv work_group_broadcast(uint a, size_t x, size_t y, size_t z); +long __ovld __conv work_group_broadcast(long a, size_t local_id); +long __ovld __conv work_group_broadcast(long a, size_t x, size_t y); +long __ovld __conv work_group_broadcast(long a, size_t x, size_t y, size_t z); +ulong __ovld __conv work_group_broadcast(ulong a, size_t local_id); +ulong __ovld __conv work_group_broadcast(ulong a, size_t x, size_t y); +ulong __ovld __conv work_group_broadcast(ulong a, size_t x, size_t y, size_t z); +float __ovld __conv work_group_broadcast(float a, size_t local_id); +float __ovld __conv work_group_broadcast(float a, size_t x, size_t y); +float __ovld __conv work_group_broadcast(float a, size_t x, size_t y, size_t z); +#ifdef cl_khr_fp64 +double __ovld __conv work_group_broadcast(double a, size_t local_id); +double __ovld __conv work_group_broadcast(double a, size_t x, size_t y); +double __ovld __conv work_group_broadcast(double a, size_t x, size_t y, size_t z); +#endif //cl_khr_fp64 + +#ifdef cl_khr_fp16 +half __ovld __conv work_group_reduce_add(half x); +half __ovld __conv work_group_reduce_min(half x); +half __ovld __conv work_group_reduce_max(half x); +half __ovld __conv work_group_scan_exclusive_add(half x); +half __ovld __conv work_group_scan_exclusive_min(half x); +half __ovld __conv work_group_scan_exclusive_max(half x); +half __ovld __conv work_group_scan_inclusive_add(half x); +half __ovld __conv work_group_scan_inclusive_min(half x); +half __ovld __conv work_group_scan_inclusive_max(half x); +#endif +int __ovld __conv work_group_reduce_add(int x); +int __ovld __conv work_group_reduce_min(int x); +int __ovld __conv work_group_reduce_max(int x); +int __ovld __conv work_group_scan_exclusive_add(int x); +int __ovld __conv work_group_scan_exclusive_min(int x); +int __ovld __conv work_group_scan_exclusive_max(int x); +int __ovld __conv work_group_scan_inclusive_add(int x); +int __ovld __conv work_group_scan_inclusive_min(int x); +int __ovld __conv work_group_scan_inclusive_max(int x); +uint __ovld __conv work_group_reduce_add(uint x); +uint __ovld __conv work_group_reduce_min(uint x); +uint __ovld __conv work_group_reduce_max(uint x); +uint __ovld __conv work_group_scan_exclusive_add(uint x); +uint __ovld __conv work_group_scan_exclusive_min(uint x); +uint __ovld __conv work_group_scan_exclusive_max(uint x); +uint __ovld __conv work_group_scan_inclusive_add(uint x); +uint __ovld __conv work_group_scan_inclusive_min(uint x); +uint __ovld __conv work_group_scan_inclusive_max(uint x); +long __ovld __conv work_group_reduce_add(long x); +long __ovld __conv work_group_reduce_min(long x); +long __ovld __conv work_group_reduce_max(long x); +long __ovld __conv work_group_scan_exclusive_add(long x); +long __ovld __conv work_group_scan_exclusive_min(long x); +long __ovld __conv work_group_scan_exclusive_max(long x); +long __ovld __conv work_group_scan_inclusive_add(long x); +long __ovld __conv work_group_scan_inclusive_min(long x); +long __ovld __conv work_group_scan_inclusive_max(long x); +ulong __ovld __conv work_group_reduce_add(ulong x); +ulong __ovld __conv work_group_reduce_min(ulong x); +ulong __ovld __conv work_group_reduce_max(ulong x); +ulong __ovld __conv work_group_scan_exclusive_add(ulong x); +ulong __ovld __conv work_group_scan_exclusive_min(ulong x); +ulong __ovld __conv work_group_scan_exclusive_max(ulong x); +ulong __ovld __conv work_group_scan_inclusive_add(ulong x); +ulong __ovld __conv work_group_scan_inclusive_min(ulong x); +ulong __ovld __conv work_group_scan_inclusive_max(ulong x); +float __ovld __conv work_group_reduce_add(float x); +float __ovld __conv work_group_reduce_min(float x); +float __ovld __conv work_group_reduce_max(float x); +float __ovld __conv work_group_scan_exclusive_add(float x); +float __ovld __conv work_group_scan_exclusive_min(float x); +float __ovld __conv work_group_scan_exclusive_max(float x); +float __ovld __conv work_group_scan_inclusive_add(float x); +float __ovld __conv work_group_scan_inclusive_min(float x); +float __ovld __conv work_group_scan_inclusive_max(float x); +#ifdef cl_khr_fp64 +double __ovld __conv work_group_reduce_add(double x); +double __ovld __conv work_group_reduce_min(double x); +double __ovld __conv work_group_reduce_max(double x); +double __ovld __conv work_group_scan_exclusive_add(double x); +double __ovld __conv work_group_scan_exclusive_min(double x); +double __ovld __conv work_group_scan_exclusive_max(double x); +double __ovld __conv work_group_scan_inclusive_add(double x); +double __ovld __conv work_group_scan_inclusive_min(double x); +double __ovld __conv work_group_scan_inclusive_max(double x); +#endif //cl_khr_fp64 + +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +// OpenCL v2.0 s6.13.16 - Pipe Functions +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +#define PIPE_RESERVE_ID_VALID_BIT (1U << 30) +#define CLK_NULL_RESERVE_ID (__builtin_astype(((void*)(__SIZE_MAX__)), reserve_id_t)) +bool __ovld is_valid_reserve_id(reserve_id_t reserve_id); +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + + +// OpenCL v2.0 s6.13.17 - Enqueue Kernels +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +#define CL_COMPLETE 0x0 +#define CL_RUNNING 0x1 +#define CL_SUBMITTED 0x2 +#define CL_QUEUED 0x3 + +#define CLK_SUCCESS 0 +#define CLK_ENQUEUE_FAILURE -101 +#define CLK_INVALID_QUEUE -102 +#define CLK_INVALID_NDRANGE -160 +#define CLK_INVALID_EVENT_WAIT_LIST -57 +#define CLK_DEVICE_QUEUE_FULL -161 +#define CLK_INVALID_ARG_SIZE -51 +#define CLK_EVENT_ALLOCATION_FAILURE -100 +#define CLK_OUT_OF_RESOURCES -5 + +#define CLK_NULL_QUEUE 0 +#define CLK_NULL_EVENT (__builtin_astype(((void*)(__SIZE_MAX__)), clk_event_t)) + +// execution model related definitions +#define CLK_ENQUEUE_FLAGS_NO_WAIT 0x0 +#define CLK_ENQUEUE_FLAGS_WAIT_KERNEL 0x1 +#define CLK_ENQUEUE_FLAGS_WAIT_WORK_GROUP 0x2 + +typedef int kernel_enqueue_flags_t; +typedef int clk_profiling_info; + +// Profiling info name (see capture_event_profiling_info) +#define CLK_PROFILING_COMMAND_EXEC_TIME 0x1 + +#define MAX_WORK_DIM 3 + +// ToDo: Remove definition of ndrange_t in Clang as an opaque type and add back +// the following ndrange_t definition. +#if 0 +typedef struct { + unsigned int workDimension; + size_t globalWorkOffset[MAX_WORK_DIM]; + size_t globalWorkSize[MAX_WORK_DIM]; + size_t localWorkSize[MAX_WORK_DIM]; +} ndrange_t; +#endif + +ndrange_t __ovld ndrange_1D(size_t); +ndrange_t __ovld ndrange_1D(size_t, size_t); +ndrange_t __ovld ndrange_1D(size_t, size_t, size_t); + +ndrange_t __ovld ndrange_2D(const size_t[2]); +ndrange_t __ovld ndrange_2D(const size_t[2], const size_t[2]); +ndrange_t __ovld ndrange_2D(const size_t[2], const size_t[2], const size_t[2]); + +ndrange_t __ovld ndrange_3D(const size_t[3]); +ndrange_t __ovld ndrange_3D(const size_t[3], const size_t[3]); +ndrange_t __ovld ndrange_3D(const size_t[3], const size_t[3], const size_t[3]); + +int __ovld enqueue_marker(queue_t, uint, const __private clk_event_t*, __private clk_event_t*); + +void __ovld retain_event(clk_event_t); + +void __ovld release_event(clk_event_t); + +clk_event_t __ovld create_user_event(void); + +void __ovld set_user_event_status(clk_event_t e, int state); + +bool __ovld is_valid_event (clk_event_t event); + +void __ovld capture_event_profiling_info(clk_event_t, clk_profiling_info, __global void* value); + +queue_t __ovld get_default_queue(void); +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +// OpenCL Extension v2.0 s9.17 - Sub-groups + +#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups) +// Shared Sub Group Functions +uint __ovld get_sub_group_size(void); +uint __ovld get_max_sub_group_size(void); +uint __ovld get_num_sub_groups(void); +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +uint __ovld get_enqueued_num_sub_groups(void); +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 +uint __ovld get_sub_group_id(void); +uint __ovld get_sub_group_local_id(void); + +void __ovld __conv sub_group_barrier(cl_mem_fence_flags flags); +#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 +void __ovld __conv sub_group_barrier(cl_mem_fence_flags flags, memory_scope scope); +#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 + +int __ovld __conv sub_group_all(int predicate); +int __ovld __conv sub_group_any(int predicate); + +int __ovld __conv sub_group_broadcast(int x, uint sub_group_local_id); +uint __ovld __conv sub_group_broadcast(uint x, uint sub_group_local_id); +long __ovld __conv sub_group_broadcast(long x, uint sub_group_local_id); +ulong __ovld __conv sub_group_broadcast(ulong x, uint sub_group_local_id); +float __ovld __conv sub_group_broadcast(float x, uint sub_group_local_id); + +int __ovld __conv sub_group_reduce_add(int x); +uint __ovld __conv sub_group_reduce_add(uint x); +long __ovld __conv sub_group_reduce_add(long x); +ulong __ovld __conv sub_group_reduce_add(ulong x); +float __ovld __conv sub_group_reduce_add(float x); +int __ovld __conv sub_group_reduce_min(int x); +uint __ovld __conv sub_group_reduce_min(uint x); +long __ovld __conv sub_group_reduce_min(long x); +ulong __ovld __conv sub_group_reduce_min(ulong x); +float __ovld __conv sub_group_reduce_min(float x); +int __ovld __conv sub_group_reduce_max(int x); +uint __ovld __conv sub_group_reduce_max(uint x); +long __ovld __conv sub_group_reduce_max(long x); +ulong __ovld __conv sub_group_reduce_max(ulong x); +float __ovld __conv sub_group_reduce_max(float x); + +int __ovld __conv sub_group_scan_exclusive_add(int x); +uint __ovld __conv sub_group_scan_exclusive_add(uint x); +long __ovld __conv sub_group_scan_exclusive_add(long x); +ulong __ovld __conv sub_group_scan_exclusive_add(ulong x); +float __ovld __conv sub_group_scan_exclusive_add(float x); +int __ovld __conv sub_group_scan_exclusive_min(int x); +uint __ovld __conv sub_group_scan_exclusive_min(uint x); +long __ovld __conv sub_group_scan_exclusive_min(long x); +ulong __ovld __conv sub_group_scan_exclusive_min(ulong x); +float __ovld __conv sub_group_scan_exclusive_min(float x); +int __ovld __conv sub_group_scan_exclusive_max(int x); +uint __ovld __conv sub_group_scan_exclusive_max(uint x); +long __ovld __conv sub_group_scan_exclusive_max(long x); +ulong __ovld __conv sub_group_scan_exclusive_max(ulong x); +float __ovld __conv sub_group_scan_exclusive_max(float x); + +int __ovld __conv sub_group_scan_inclusive_add(int x); +uint __ovld __conv sub_group_scan_inclusive_add(uint x); +long __ovld __conv sub_group_scan_inclusive_add(long x); +ulong __ovld __conv sub_group_scan_inclusive_add(ulong x); +float __ovld __conv sub_group_scan_inclusive_add(float x); +int __ovld __conv sub_group_scan_inclusive_min(int x); +uint __ovld __conv sub_group_scan_inclusive_min(uint x); +long __ovld __conv sub_group_scan_inclusive_min(long x); +ulong __ovld __conv sub_group_scan_inclusive_min(ulong x); +float __ovld __conv sub_group_scan_inclusive_min(float x); +int __ovld __conv sub_group_scan_inclusive_max(int x); +uint __ovld __conv sub_group_scan_inclusive_max(uint x); +long __ovld __conv sub_group_scan_inclusive_max(long x); +ulong __ovld __conv sub_group_scan_inclusive_max(ulong x); +float __ovld __conv sub_group_scan_inclusive_max(float x); + +#ifdef cl_khr_fp16 +half __ovld __conv sub_group_broadcast(half x, uint sub_group_local_id); +half __ovld __conv sub_group_reduce_add(half x); +half __ovld __conv sub_group_reduce_min(half x); +half __ovld __conv sub_group_reduce_max(half x); +half __ovld __conv sub_group_scan_exclusive_add(half x); +half __ovld __conv sub_group_scan_exclusive_min(half x); +half __ovld __conv sub_group_scan_exclusive_max(half x); +half __ovld __conv sub_group_scan_inclusive_add(half x); +half __ovld __conv sub_group_scan_inclusive_min(half x); +half __ovld __conv sub_group_scan_inclusive_max(half x); +#endif //cl_khr_fp16 + +#ifdef cl_khr_fp64 +double __ovld __conv sub_group_broadcast(double x, uint sub_group_local_id); +double __ovld __conv sub_group_reduce_add(double x); +double __ovld __conv sub_group_reduce_min(double x); +double __ovld __conv sub_group_reduce_max(double x); +double __ovld __conv sub_group_scan_exclusive_add(double x); +double __ovld __conv sub_group_scan_exclusive_min(double x); +double __ovld __conv sub_group_scan_exclusive_max(double x); +double __ovld __conv sub_group_scan_inclusive_add(double x); +double __ovld __conv sub_group_scan_inclusive_min(double x); +double __ovld __conv sub_group_scan_inclusive_max(double x); +#endif //cl_khr_fp64 + +#endif //cl_khr_subgroups cl_intel_subgroups + +#ifdef cl_amd_media_ops +uint __ovld amd_bitalign(uint a, uint b, uint c); +uint2 __ovld amd_bitalign(uint2 a, uint2 b, uint2 c); +uint3 __ovld amd_bitalign(uint3 a, uint3 b, uint3 c); +uint4 __ovld amd_bitalign(uint4 a, uint4 b, uint4 c); +uint8 __ovld amd_bitalign(uint8 a, uint8 b, uint8 c); +uint16 __ovld amd_bitalign(uint16 a, uint16 b, uint16 c); + +uint __ovld amd_bytealign(uint a, uint b, uint c); +uint2 __ovld amd_bytealign(uint2 a, uint2 b, uint2 c); +uint3 __ovld amd_bytealign(uint3 a, uint3 b, uint3 c); +uint4 __ovld amd_bytealign(uint4 a, uint4 b, uint4 c); +uint8 __ovld amd_bytealign(uint8 a, uint8 b, uint8 c); +uint16 __ovld amd_bytealign(uint16 a, uint16 b, uint16 c); + +uint __ovld amd_lerp(uint a, uint b, uint c); +uint2 __ovld amd_lerp(uint2 a, uint2 b, uint2 c); +uint3 __ovld amd_lerp(uint3 a, uint3 b, uint3 c); +uint4 __ovld amd_lerp(uint4 a, uint4 b, uint4 c); +uint8 __ovld amd_lerp(uint8 a, uint8 b, uint8 c); +uint16 __ovld amd_lerp(uint16 a, uint16 b, uint16 c); + +uint __ovld amd_pack(float4 v); + +uint __ovld amd_sad4(uint4 x, uint4 y, uint z); + +uint __ovld amd_sadhi(uint a, uint b, uint c); +uint2 __ovld amd_sadhi(uint2 a, uint2 b, uint2 c); +uint3 __ovld amd_sadhi(uint3 a, uint3 b, uint3 c); +uint4 __ovld amd_sadhi(uint4 a, uint4 b, uint4 c); +uint8 __ovld amd_sadhi(uint8 a, uint8 b, uint8 c); +uint16 __ovld amd_sadhi(uint16 a, uint16 b, uint16 c); + +uint __ovld amd_sad(uint a, uint b, uint c); +uint2 __ovld amd_sad(uint2 a, uint2 b, uint2 c); +uint3 __ovld amd_sad(uint3 a, uint3 b, uint3 c); +uint4 __ovld amd_sad(uint4 a, uint4 b, uint4 c); +uint8 __ovld amd_sad(uint8 a, uint8 b, uint8 c); +uint16 __ovld amd_sad(uint16 a, uint16 b, uint16 c); + +float __ovld amd_unpack0(uint a); +float2 __ovld amd_unpack0(uint2 a); +float3 __ovld amd_unpack0(uint3 a); +float4 __ovld amd_unpack0(uint4 a); +float8 __ovld amd_unpack0(uint8 a); +float16 __ovld amd_unpack0(uint16 a); + +float __ovld amd_unpack1(uint a); +float2 __ovld amd_unpack1(uint2 a); +float3 __ovld amd_unpack1(uint3 a); +float4 __ovld amd_unpack1(uint4 a); +float8 __ovld amd_unpack1(uint8 a); +float16 __ovld amd_unpack1(uint16 a); + +float __ovld amd_unpack2(uint a); +float2 __ovld amd_unpack2(uint2 a); +float3 __ovld amd_unpack2(uint3 a); +float4 __ovld amd_unpack2(uint4 a); +float8 __ovld amd_unpack2(uint8 a); +float16 __ovld amd_unpack2(uint16 a); + +float __ovld amd_unpack3(uint a); +float2 __ovld amd_unpack3(uint2 a); +float3 __ovld amd_unpack3(uint3 a); +float4 __ovld amd_unpack3(uint4 a); +float8 __ovld amd_unpack3(uint8 a); +float16 __ovld amd_unpack3(uint16 a); +#endif // cl_amd_media_ops + +#ifdef cl_amd_media_ops2 +int __ovld amd_bfe(int src0, uint src1, uint src2); +int2 __ovld amd_bfe(int2 src0, uint2 src1, uint2 src2); +int3 __ovld amd_bfe(int3 src0, uint3 src1, uint3 src2); +int4 __ovld amd_bfe(int4 src0, uint4 src1, uint4 src2); +int8 __ovld amd_bfe(int8 src0, uint8 src1, uint8 src2); +int16 __ovld amd_bfe(int16 src0, uint16 src1, uint16 src2); + +uint __ovld amd_bfe(uint src0, uint src1, uint src2); +uint2 __ovld amd_bfe(uint2 src0, uint2 src1, uint2 src2); +uint3 __ovld amd_bfe(uint3 src0, uint3 src1, uint3 src2); +uint4 __ovld amd_bfe(uint4 src0, uint4 src1, uint4 src2); +uint8 __ovld amd_bfe(uint8 src0, uint8 src1, uint8 src2); +uint16 __ovld amd_bfe(uint16 src0, uint16 src1, uint16 src2); + +uint __ovld amd_bfm(uint src0, uint src1); +uint2 __ovld amd_bfm(uint2 src0, uint2 src1); +uint3 __ovld amd_bfm(uint3 src0, uint3 src1); +uint4 __ovld amd_bfm(uint4 src0, uint4 src1); +uint8 __ovld amd_bfm(uint8 src0, uint8 src1); +uint16 __ovld amd_bfm(uint16 src0, uint16 src1); + +float __ovld amd_max3(float src0, float src1, float src2); +float2 __ovld amd_max3(float2 src0, float2 src1, float2 src2); +float3 __ovld amd_max3(float3 src0, float3 src1, float3 src2); +float4 __ovld amd_max3(float4 src0, float4 src1, float4 src2); +float8 __ovld amd_max3(float8 src0, float8 src1, float8 src2); +float16 __ovld amd_max3(float16 src0, float16 src1, float16 src2); + +int __ovld amd_max3(int src0, int src1, int src2); +int2 __ovld amd_max3(int2 src0, int2 src1, int2 src2); +int3 __ovld amd_max3(int3 src0, int3 src1, int3 src2); +int4 __ovld amd_max3(int4 src0, int4 src1, int4 src2); +int8 __ovld amd_max3(int8 src0, int8 src1, int8 src2); +int16 __ovld amd_max3(int16 src0, int16 src1, int16 src2); + +uint __ovld amd_max3(uint src0, uint src1, uint src2); +uint2 __ovld amd_max3(uint2 src0, uint2 src1, uint2 src2); +uint3 __ovld amd_max3(uint3 src0, uint3 src1, uint3 src2); +uint4 __ovld amd_max3(uint4 src0, uint4 src1, uint4 src2); +uint8 __ovld amd_max3(uint8 src0, uint8 src1, uint8 src2); +uint16 __ovld amd_max3(uint16 src0, uint16 src1, uint16 src2); + +float __ovld amd_median3(float src0, float src1, float src2); +float2 __ovld amd_median3(float2 src0, float2 src1, float2 src2); +float3 __ovld amd_median3(float3 src0, float3 src1, float3 src2); +float4 __ovld amd_median3(float4 src0, float4 src1, float4 src2); +float8 __ovld amd_median3(float8 src0, float8 src1, float8 src2); +float16 __ovld amd_median3(float16 src0, float16 src1, float16 src2); + +int __ovld amd_median3(int src0, int src1, int src2); +int2 __ovld amd_median3(int2 src0, int2 src1, int2 src2); +int3 __ovld amd_median3(int3 src0, int3 src1, int3 src2); +int4 __ovld amd_median3(int4 src0, int4 src1, int4 src2); +int8 __ovld amd_median3(int8 src0, int8 src1, int8 src2); +int16 __ovld amd_median3(int16 src0, int16 src1, int16 src2); + +uint __ovld amd_median3(uint src0, uint src1, uint src2); +uint2 __ovld amd_median3(uint2 src0, uint2 src1, uint2 src2); +uint3 __ovld amd_median3(uint3 src0, uint3 src1, uint3 src2); +uint4 __ovld amd_median3(uint4 src0, uint4 src1, uint4 src2); +uint8 __ovld amd_median3(uint8 src0, uint8 src1, uint8 src2); +uint16 __ovld amd_median3(uint16 src0, uint16 src1, uint16 src2); + +float __ovld amd_min3(float src0, float src1, float src); +float2 __ovld amd_min3(float2 src0, float2 src1, float2 src); +float3 __ovld amd_min3(float3 src0, float3 src1, float3 src); +float4 __ovld amd_min3(float4 src0, float4 src1, float4 src); +float8 __ovld amd_min3(float8 src0, float8 src1, float8 src); +float16 __ovld amd_min3(float16 src0, float16 src1, float16 src); + +int __ovld amd_min3(int src0, int src1, int src2); +int2 __ovld amd_min3(int2 src0, int2 src1, int2 src2); +int3 __ovld amd_min3(int3 src0, int3 src1, int3 src2); +int4 __ovld amd_min3(int4 src0, int4 src1, int4 src2); +int8 __ovld amd_min3(int8 src0, int8 src1, int8 src2); +int16 __ovld amd_min3(int16 src0, int16 src1, int16 src2); + +uint __ovld amd_min3(uint src0, uint src1, uint src2); +uint2 __ovld amd_min3(uint2 src0, uint2 src1, uint2 src2); +uint3 __ovld amd_min3(uint3 src0, uint3 src1, uint3 src2); +uint4 __ovld amd_min3(uint4 src0, uint4 src1, uint4 src2); +uint8 __ovld amd_min3(uint8 src0, uint8 src1, uint8 src2); +uint16 __ovld amd_min3(uint16 src0, uint16 src1, uint16 src2); + +ulong __ovld amd_mqsad(ulong src0, uint src1, ulong src2); +ulong2 __ovld amd_mqsad(ulong2 src0, uint2 src1, ulong2 src2); +ulong3 __ovld amd_mqsad(ulong3 src0, uint3 src1, ulong3 src2); +ulong4 __ovld amd_mqsad(ulong4 src0, uint4 src1, ulong4 src2); +ulong8 __ovld amd_mqsad(ulong8 src0, uint8 src1, ulong8 src2); +ulong16 __ovld amd_mqsad(ulong16 src0, uint16 src1, ulong16 src2); + +ulong __ovld amd_qsad(ulong src0, uint src1, ulong src2); +ulong2 __ovld amd_qsad(ulong2 src0, uint2 src1, ulong2 src2); +ulong3 __ovld amd_qsad(ulong3 src0, uint3 src1, ulong3 src2); +ulong4 __ovld amd_qsad(ulong4 src0, uint4 src1, ulong4 src2); +ulong8 __ovld amd_qsad(ulong8 src0, uint8 src1, ulong8 src2); +ulong16 __ovld amd_qsad(ulong16 src0, uint16 src1, ulong16 src2); + +uint __ovld amd_msad(uint src0, uint src1, uint src2); +uint2 __ovld amd_msad(uint2 src0, uint2 src1, uint2 src2); +uint3 __ovld amd_msad(uint3 src0, uint3 src1, uint3 src2); +uint4 __ovld amd_msad(uint4 src0, uint4 src1, uint4 src2); +uint8 __ovld amd_msad(uint8 src0, uint8 src1, uint8 src2); +uint16 __ovld amd_msad(uint16 src0, uint16 src1, uint16 src2); + +uint __ovld amd_sadd(uint src0, uint src1, uint src2); +uint2 __ovld amd_sadd(uint2 src0, uint2 src1, uint2 src2); +uint3 __ovld amd_sadd(uint3 src0, uint3 src1, uint3 src2); +uint4 __ovld amd_sadd(uint4 src0, uint4 src1, uint4 src2); +uint8 __ovld amd_sadd(uint8 src0, uint8 src1, uint8 src2); +uint16 __ovld amd_sadd(uint16 src0, uint16 src1, uint16 src2); + +uint __ovld amd_sadw(uint src0, uint src1, uint src2); +uint2 __ovld amd_sadw(uint2 src0, uint2 src1, uint2 src2); +uint3 __ovld amd_sadw(uint3 src0, uint3 src1, uint3 src2); +uint4 __ovld amd_sadw(uint4 src0, uint4 src1, uint4 src2); +uint8 __ovld amd_sadw(uint8 src0, uint8 src1, uint8 src2); +uint16 __ovld amd_sadw(uint16 src0, uint16 src1, uint16 src2); +#endif // cl_amd_media_ops2 + +// Disable any extensions we may have enabled previously. +#pragma OPENCL EXTENSION all : disable + +#undef __cnfn +#undef __ovld +#endif //_OPENCL_H_ diff --git a/c_headers/pkuintrin.h b/c_headers/pkuintrin.h new file mode 100644 index 000000000..9e5459450 --- /dev/null +++ b/c_headers/pkuintrin.h @@ -0,0 +1,48 @@ +/*===------------- pkuintrin.h - PKU intrinsics ------------------=== + * + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __PKUINTRIN_H +#define __PKUINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("pku"))) + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_rdpkru_u32(void) +{ + return __builtin_ia32_rdpkru(); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_wrpkru(unsigned int __val) +{ + return __builtin_ia32_wrpkru(__val); +} + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/c_headers/pmmintrin.h b/c_headers/pmmintrin.h index e1b8d9b60..d4f6487af 100644 --- a/c_headers/pmmintrin.h +++ b/c_headers/pmmintrin.h @@ -20,79 +20,241 @@ * *===-----------------------------------------------------------------------=== */ - + #ifndef __PMMINTRIN_H #define __PMMINTRIN_H -#ifndef __SSE3__ -#error "SSE3 instruction set not enabled" -#else - #include /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("sse3"))) +/// \brief Loads data from an unaligned memory location to elements in a 128-bit +/// vector. If the address of the data is not 16-byte aligned, the +/// instruction may read two adjacent aligned blocks of memory to retrieve +/// the requested data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VLDDQU instruction. +/// +/// \param __p +/// A pointer to a 128-bit integer vector containing integer values. +/// \returns A 128-bit vector containing the moved values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_lddqu_si128(__m128i const *__p) { return (__m128i)__builtin_ia32_lddqu((char const *)__p); } +/// \brief Adds the even-indexed values and subtracts the odd-indexed values of +/// two 128-bit vectors of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDSUBPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing the left source operand. +/// \param __b +/// A 128-bit vector of [4 x float] containing the right source operand. +/// \returns A 128-bit vector of [4 x float] containing the alternating sums and +/// differences of both operands. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_addsub_ps(__m128 __a, __m128 __b) { - return __builtin_ia32_addsubps(__a, __b); + return __builtin_ia32_addsubps((__v4sf)__a, (__v4sf)__b); } +/// \brief Horizontally adds the adjacent pairs of values contained in two +/// 128-bit vectors of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VHADDPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// The horizontal sums of the values are stored in the lower bits of the +/// destination. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// The horizontal sums of the values are stored in the upper bits of the +/// destination. +/// \returns A 128-bit vector of [4 x float] containing the horizontal sums of +/// both operands. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_hadd_ps(__m128 __a, __m128 __b) { - return __builtin_ia32_haddps(__a, __b); + return __builtin_ia32_haddps((__v4sf)__a, (__v4sf)__b); } +/// \brief Horizontally subtracts the adjacent pairs of values contained in two +/// 128-bit vectors of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VHSUBPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// The horizontal differences between the values are stored in the lower +/// bits of the destination. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// The horizontal differences between the values are stored in the upper +/// bits of the destination. +/// \returns A 128-bit vector of [4 x float] containing the horizontal +/// differences of both operands. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_hsub_ps(__m128 __a, __m128 __b) { - return __builtin_ia32_hsubps(__a, __b); + return __builtin_ia32_hsubps((__v4sf)__a, (__v4sf)__b); } +/// \brief Moves and duplicates high-order (odd-indexed) values from a 128-bit +/// vector of [4 x float] to float values stored in a 128-bit vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSHDUP instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. \n +/// Bits [127:96] of the source are written to bits [127:96] and [95:64] of +/// the destination. \n +/// Bits [63:32] of the source are written to bits [63:32] and [31:0] of the +/// destination. +/// \returns A 128-bit vector of [4 x float] containing the moved and duplicated +/// values. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_movehdup_ps(__m128 __a) { - return __builtin_shufflevector(__a, __a, 1, 1, 3, 3); + return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 1, 1, 3, 3); } +/// \brief Duplicates low-order (even-indexed) values from a 128-bit vector of +/// [4 x float] to float values stored in a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSLDUP instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float] \n +/// Bits [95:64] of the source are written to bits [127:96] and [95:64] of +/// the destination. \n +/// Bits [31:0] of the source are written to bits [63:32] and [31:0] of the +/// destination. +/// \returns A 128-bit vector of [4 x float] containing the moved and duplicated +/// values. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_moveldup_ps(__m128 __a) { - return __builtin_shufflevector(__a, __a, 0, 0, 2, 2); + return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 0, 2, 2); } +/// \brief Adds the even-indexed values and subtracts the odd-indexed values of +/// two 128-bit vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDSUBPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing the left source operand. +/// \param __b +/// A 128-bit vector of [2 x double] containing the right source operand. +/// \returns A 128-bit vector of [2 x double] containing the alternating sums +/// and differences of both operands. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_addsub_pd(__m128d __a, __m128d __b) { - return __builtin_ia32_addsubpd(__a, __b); + return __builtin_ia32_addsubpd((__v2df)__a, (__v2df)__b); } +/// \brief Horizontally adds the pairs of values contained in two 128-bit +/// vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VHADDPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// The horizontal sum of the values is stored in the lower bits of the +/// destination. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// The horizontal sum of the values is stored in the upper bits of the +/// destination. +/// \returns A 128-bit vector of [2 x double] containing the horizontal sums of +/// both operands. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_hadd_pd(__m128d __a, __m128d __b) { - return __builtin_ia32_haddpd(__a, __b); + return __builtin_ia32_haddpd((__v2df)__a, (__v2df)__b); } +/// \brief Horizontally subtracts the pairs of values contained in two 128-bit +/// vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VHSUBPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// The horizontal difference of the values is stored in the lower bits of +/// the destination. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// The horizontal difference of the values is stored in the upper bits of +/// the destination. +/// \returns A 128-bit vector of [2 x double] containing the horizontal +/// differences of both operands. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_hsub_pd(__m128d __a, __m128d __b) { - return __builtin_ia32_hsubpd(__a, __b); + return __builtin_ia32_hsubpd((__v2df)__a, (__v2df)__b); } +/// \brief Moves and duplicates one double-precision value to double-precision +/// values stored in a 128-bit vector of [2 x double]. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_loaddup_pd(double const * dp); +/// \endcode +/// +/// This intrinsic corresponds to the VMOVDDUP instruction. +/// +/// \param dp +/// A pointer to a double-precision value to be moved and duplicated. +/// \returns A 128-bit vector of [2 x double] containing the moved and +/// duplicated values. #define _mm_loaddup_pd(dp) _mm_load1_pd(dp) +/// \brief Moves and duplicates the double-precision value in the lower bits of +/// a 128-bit vector of [2 x double] to double-precision values stored in a +/// 128-bit vector of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDDUP instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. Bits [63:0] are written to bits +/// [127:64] and [63:0] of the destination. +/// \returns A 128-bit vector of [2 x double] containing the moved and +/// duplicated values. static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_movedup_pd(__m128d __a) { - return __builtin_shufflevector(__a, __a, 0, 0); + return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0); } #define _MM_DENORMALS_ZERO_ON (0x0040) @@ -103,12 +265,40 @@ _mm_movedup_pd(__m128d __a) #define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK) #define _MM_SET_DENORMALS_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x))) +/// \brief Establishes a linear address memory range to be monitored and puts +/// the processor in the monitor event pending state. Data stored in the +/// monitored address range causes the processor to exit the pending state. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MONITOR instruction. +/// +/// \param __p +/// The memory range to be monitored. The size of the range is determined by +/// CPUID function 0000_0005h. +/// \param __extensions +/// Optional extensions for the monitoring state. +/// \param __hints +/// Optional hints for the monitoring state. static __inline__ void __DEFAULT_FN_ATTRS _mm_monitor(void const *__p, unsigned __extensions, unsigned __hints) { __builtin_ia32_monitor((void *)__p, __extensions, __hints); } +/// \brief Used with the MONITOR instruction to wait while the processor is in +/// the monitor event pending state. Data stored in the monitored address +/// range causes the processor to exit the pending state. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MWAIT instruction. +/// +/// \param __extensions +/// Optional extensions for the monitoring state, which may vary by +/// processor. +/// \param __hints +/// Optional hints for the monitoring state, which may vary by processor. static __inline__ void __DEFAULT_FN_ATTRS _mm_mwait(unsigned __extensions, unsigned __hints) { @@ -117,6 +307,4 @@ _mm_mwait(unsigned __extensions, unsigned __hints) #undef __DEFAULT_FN_ATTRS -#endif /* __SSE3__ */ - #endif /* __PMMINTRIN_H */ diff --git a/c_headers/popcntintrin.h b/c_headers/popcntintrin.h index 1a4e9000a..0b4793e58 100644 --- a/c_headers/popcntintrin.h +++ b/c_headers/popcntintrin.h @@ -21,28 +21,76 @@ *===-----------------------------------------------------------------------=== */ -#ifndef __POPCNT__ -#error "POPCNT instruction set not enabled" -#endif - #ifndef _POPCNTINTRIN_H #define _POPCNTINTRIN_H /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("popcnt"))) +/// \brief Counts the number of bits in the source operand having a value of 1. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the POPCNT instruction. +/// +/// \param __A +/// An unsigned 32-bit integer operand. +/// \returns A 32-bit integer containing the number of bits with value 1 in the +/// source operand. static __inline__ int __DEFAULT_FN_ATTRS _mm_popcnt_u32(unsigned int __A) { return __builtin_popcount(__A); } +/// \brief Counts the number of bits in the source operand having a value of 1. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the POPCNT instruction. +/// +/// \param __A +/// A signed 32-bit integer operand. +/// \returns A 32-bit integer containing the number of bits with value 1 in the +/// source operand. +static __inline__ int __DEFAULT_FN_ATTRS +_popcnt32(int __A) +{ + return __builtin_popcount(__A); +} + #ifdef __x86_64__ +/// \brief Counts the number of bits in the source operand having a value of 1. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the POPCNT instruction. +/// +/// \param __A +/// An unsigned 64-bit integer operand. +/// \returns A 64-bit integer containing the number of bits with value 1 in the +/// source operand. static __inline__ long long __DEFAULT_FN_ATTRS _mm_popcnt_u64(unsigned long long __A) { return __builtin_popcountll(__A); } + +/// \brief Counts the number of bits in the source operand having a value of 1. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the POPCNT instruction. +/// +/// \param __A +/// A signed 64-bit integer operand. +/// \returns A 64-bit integer containing the number of bits with value 1 in the +/// source operand. +static __inline__ long long __DEFAULT_FN_ATTRS +_popcnt64(long long __A) +{ + return __builtin_popcountll(__A); +} #endif /* __x86_64__ */ #undef __DEFAULT_FN_ATTRS diff --git a/c_headers/prfchwintrin.h b/c_headers/prfchwintrin.h index 9825bd8c9..ba0285751 100644 --- a/c_headers/prfchwintrin.h +++ b/c_headers/prfchwintrin.h @@ -29,6 +29,12 @@ #define __PRFCHWINTRIN_H #if defined(__PRFCHW__) || defined(__3dNOW__) +static __inline__ void __attribute__((__always_inline__, __nodebug__)) +_m_prefetch(void *__P) +{ + __builtin_prefetch (__P, 0, 3 /* _MM_HINT_T0 */); +} + static __inline__ void __attribute__((__always_inline__, __nodebug__)) _m_prefetchw(void *__P) { diff --git a/c_headers/rdseedintrin.h b/c_headers/rdseedintrin.h index fdf7e18af..421f4ea48 100644 --- a/c_headers/rdseedintrin.h +++ b/c_headers/rdseedintrin.h @@ -28,10 +28,8 @@ #ifndef __RDSEEDINTRIN_H #define __RDSEEDINTRIN_H -#ifdef __RDSEED__ - /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("rdseed"))) static __inline__ int __DEFAULT_FN_ATTRS _rdseed16_step(unsigned short *__p) @@ -55,5 +53,4 @@ _rdseed64_step(unsigned long long *__p) #undef __DEFAULT_FN_ATTRS -#endif /* __RDSEED__ */ #endif /* __RDSEEDINTRIN_H */ diff --git a/c_headers/rtmintrin.h b/c_headers/rtmintrin.h index 17256815f..e6a58d743 100644 --- a/c_headers/rtmintrin.h +++ b/c_headers/rtmintrin.h @@ -38,7 +38,7 @@ #define _XABORT_CODE(x) (((x) >> 24) & 0xFF) /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("rtm"))) static __inline__ unsigned int __DEFAULT_FN_ATTRS _xbegin(void) diff --git a/c_headers/shaintrin.h b/c_headers/shaintrin.h index 960cced7a..9b5d21800 100644 --- a/c_headers/shaintrin.h +++ b/c_headers/shaintrin.h @@ -28,15 +28,11 @@ #ifndef __SHAINTRIN_H #define __SHAINTRIN_H -#if !defined (__SHA__) -# error "SHA instructions not enabled" -#endif - /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sha"))) #define _mm_sha1rnds4_epu32(V1, V2, M) __extension__ ({ \ - __builtin_ia32_sha1rnds4((V1), (V2), (M)); }) + __builtin_ia32_sha1rnds4((__v4si)(__m128i)(V1), (__v4si)(__m128i)(V2), (M)); }) static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sha1nexte_epu32(__m128i __X, __m128i __Y) diff --git a/c_headers/smmintrin.h b/c_headers/smmintrin.h index 04bd0722b..e48ab034f 100644 --- a/c_headers/smmintrin.h +++ b/c_headers/smmintrin.h @@ -24,14 +24,10 @@ #ifndef _SMMINTRIN_H #define _SMMINTRIN_H -#ifndef __SSE4_1__ -#error "SSE4.1 instruction set not enabled" -#else - #include /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.1"))) /* SSE4 Rounding macros. */ #define _MM_FROUND_TO_NEAREST_INT 0x00 @@ -61,35 +57,28 @@ #define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR) #define _mm_round_ps(X, M) __extension__ ({ \ - __m128 __X = (X); \ - (__m128) __builtin_ia32_roundps((__v4sf)__X, (M)); }) + (__m128)__builtin_ia32_roundps((__v4sf)(__m128)(X), (M)); }) #define _mm_round_ss(X, Y, M) __extension__ ({ \ - __m128 __X = (X); \ - __m128 __Y = (Y); \ - (__m128) __builtin_ia32_roundss((__v4sf)__X, (__v4sf)__Y, (M)); }) + (__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (M)); }) #define _mm_round_pd(X, M) __extension__ ({ \ - __m128d __X = (X); \ - (__m128d) __builtin_ia32_roundpd((__v2df)__X, (M)); }) + (__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(X), (M)); }) #define _mm_round_sd(X, Y, M) __extension__ ({ \ - __m128d __X = (X); \ - __m128d __Y = (Y); \ - (__m128d) __builtin_ia32_roundsd((__v2df)__X, (__v2df)__Y, (M)); }) + (__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (M)); }) /* SSE4 Packed Blending Intrinsics. */ #define _mm_blend_pd(V1, V2, M) __extension__ ({ \ - __m128d __V1 = (V1); \ - __m128d __V2 = (V2); \ - (__m128d)__builtin_shufflevector((__v2df)__V1, (__v2df)__V2, \ + (__m128d)__builtin_shufflevector((__v2df)(__m128d)(V1), \ + (__v2df)(__m128d)(V2), \ (((M) & 0x01) ? 2 : 0), \ (((M) & 0x02) ? 3 : 1)); }) #define _mm_blend_ps(V1, V2, M) __extension__ ({ \ - __m128 __V1 = (V1); \ - __m128 __V2 = (V2); \ - (__m128)__builtin_shufflevector((__v4sf)__V1, (__v4sf)__V2, \ + (__m128)__builtin_shufflevector((__v4sf)(__m128)(V1), (__v4sf)(__m128)(V2), \ (((M) & 0x01) ? 4 : 0), \ (((M) & 0x02) ? 5 : 1), \ (((M) & 0x04) ? 6 : 2), \ @@ -117,9 +106,8 @@ _mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M) } #define _mm_blend_epi16(V1, V2, M) __extension__ ({ \ - __m128i __V1 = (V1); \ - __m128i __V2 = (V2); \ - (__m128i)__builtin_shufflevector((__v8hi)__V1, (__v8hi)__V2, \ + (__m128i)__builtin_shufflevector((__v8hi)(__m128i)(V1), \ + (__v8hi)(__m128i)(V2), \ (((M) & 0x01) ? 8 : 0), \ (((M) & 0x02) ? 9 : 1), \ (((M) & 0x04) ? 10 : 2), \ @@ -133,7 +121,7 @@ _mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M) static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi32 (__m128i __V1, __m128i __V2) { - return (__m128i) ((__v4si)__V1 * (__v4si)__V2); + return (__m128i) ((__v4su)__V1 * (__v4su)__V2); } static __inline__ __m128i __DEFAULT_FN_ATTRS @@ -144,20 +132,18 @@ _mm_mul_epi32 (__m128i __V1, __m128i __V2) /* SSE4 Floating Point Dot Product Instructions. */ #define _mm_dp_ps(X, Y, M) __extension__ ({ \ - __m128 __X = (X); \ - __m128 __Y = (Y); \ - (__m128) __builtin_ia32_dpps((__v4sf)__X, (__v4sf)__Y, (M)); }) + (__m128) __builtin_ia32_dpps((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (M)); }) #define _mm_dp_pd(X, Y, M) __extension__ ({\ - __m128d __X = (X); \ - __m128d __Y = (Y); \ - (__m128d) __builtin_ia32_dppd((__v2df)__X, (__v2df)__Y, (M)); }) + (__m128d) __builtin_ia32_dppd((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (M)); }) /* SSE4 Streaming Load Hint Instruction. */ static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_stream_load_si128 (__m128i *__V) +_mm_stream_load_si128 (__m128i const *__V) { - return (__m128i) __builtin_ia32_movntdqa ((__v2di *) __V); + return (__m128i) __builtin_ia32_movntdqa ((const __v2di *) __V); } /* SSE4 Packed Integer Min/Max Instructions. */ @@ -213,7 +199,7 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2) #define _mm_insert_ps(X, Y, N) __builtin_ia32_insertps128((X), (Y), (N)) #define _mm_extract_ps(X, N) (__extension__ \ ({ union { int __i; float __f; } __t; \ - __v4sf __a = (__v4sf)(X); \ + __v4sf __a = (__v4sf)(__m128)(X); \ __t.__f = __a[(N) & 3]; \ __t.__i;})) @@ -221,39 +207,44 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2) /* Extract a single-precision float from X at index N into D. */ #define _MM_EXTRACT_FLOAT(D, X, N) (__extension__ ({ __v4sf __a = (__v4sf)(X); \ (D) = __a[N]; })) - + /* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create an index suitable for _mm_insert_ps. */ #define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z)) - + /* Extract a float from X at index N into the first index of the return. */ #define _MM_PICK_OUT_PS(X, N) _mm_insert_ps (_mm_setzero_ps(), (X), \ _MM_MK_INSERTPS_NDX((N), 0, 0x0e)) - + /* Insert int into packed integer array at index. */ -#define _mm_insert_epi8(X, I, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \ - __a[(N) & 15] = (I); \ - __a;})) -#define _mm_insert_epi32(X, I, N) (__extension__ ({ __v4si __a = (__v4si)(X); \ - __a[(N) & 3] = (I); \ - __a;})) +#define _mm_insert_epi8(X, I, N) (__extension__ \ + ({ __v16qi __a = (__v16qi)(__m128i)(X); \ + __a[(N) & 15] = (I); \ + (__m128i)__a;})) +#define _mm_insert_epi32(X, I, N) (__extension__ \ + ({ __v4si __a = (__v4si)(__m128i)(X); \ + __a[(N) & 3] = (I); \ + (__m128i)__a;})) #ifdef __x86_64__ -#define _mm_insert_epi64(X, I, N) (__extension__ ({ __v2di __a = (__v2di)(X); \ - __a[(N) & 1] = (I); \ - __a;})) +#define _mm_insert_epi64(X, I, N) (__extension__ \ + ({ __v2di __a = (__v2di)(__m128i)(X); \ + __a[(N) & 1] = (I); \ + (__m128i)__a;})) #endif /* __x86_64__ */ /* Extract int from packed integer array at index. This returns the element * as a zero extended value, so it is unsigned. */ -#define _mm_extract_epi8(X, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \ - (int)(unsigned char) \ - __a[(N) & 15];})) -#define _mm_extract_epi32(X, N) (__extension__ ({ __v4si __a = (__v4si)(X); \ - __a[(N) & 3];})) +#define _mm_extract_epi8(X, N) (__extension__ \ + ({ __v16qi __a = (__v16qi)(__m128i)(X); \ + (int)(unsigned char) __a[(N) & 15];})) +#define _mm_extract_epi32(X, N) (__extension__ \ + ({ __v4si __a = (__v4si)(__m128i)(X); \ + (int)__a[(N) & 3];})) #ifdef __x86_64__ -#define _mm_extract_epi64(X, N) (__extension__ ({ __v2di __a = (__v2di)(X); \ - __a[(N) & 1];})) +#define _mm_extract_epi64(X, N) (__extension__ \ + ({ __v2di __a = (__v2di)(__m128i)(X); \ + (long long)__a[(N) & 1];})) #endif /* __x86_64 */ /* SSE4 128-bit Packed Integer Comparisons. */ @@ -290,74 +281,80 @@ _mm_cmpeq_epi64(__m128i __V1, __m128i __V2) static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi16(__m128i __V) { - return (__m128i) __builtin_ia32_pmovsxbw128((__v16qi) __V); + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi); } static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi32(__m128i __V) { - return (__m128i) __builtin_ia32_pmovsxbd128((__v16qi) __V); + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4si); } static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi64(__m128i __V) { - return (__m128i) __builtin_ia32_pmovsxbq128((__v16qi) __V); + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di); } static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi32(__m128i __V) { - return (__m128i) __builtin_ia32_pmovsxwd128((__v8hi) __V); + return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si); } static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi64(__m128i __V) { - return (__m128i) __builtin_ia32_pmovsxwq128((__v8hi)__V); + return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di); } static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi32_epi64(__m128i __V) { - return (__m128i) __builtin_ia32_pmovsxdq128((__v4si)__V); + return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4si)__V, (__v4si)__V, 0, 1), __v2di); } /* SSE4 Packed Integer Zero-Extension. */ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi16(__m128i __V) { - return (__m128i) __builtin_ia32_pmovzxbw128((__v16qi) __V); + return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi); } static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi32(__m128i __V) { - return (__m128i) __builtin_ia32_pmovzxbd128((__v16qi)__V); + return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4si); } static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi64(__m128i __V) { - return (__m128i) __builtin_ia32_pmovzxbq128((__v16qi)__V); + return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1), __v2di); } static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi32(__m128i __V) { - return (__m128i) __builtin_ia32_pmovzxwd128((__v8hi)__V); + return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4si); } static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi64(__m128i __V) { - return (__m128i) __builtin_ia32_pmovzxwq128((__v8hi)__V); + return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1), __v2di); } static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu32_epi64(__m128i __V) { - return (__m128i) __builtin_ia32_pmovzxdq128((__v4si)__V); + return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4su)__V, (__v4su)__V, 0, 1), __v2di); } /* SSE4 Pack with Unsigned Saturation. */ @@ -369,9 +366,8 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2) /* SSE4 Multiple Packed Sums of Absolute Difference. */ #define _mm_mpsadbw_epu8(X, Y, M) __extension__ ({ \ - __m128i __X = (X); \ - __m128i __Y = (Y); \ - (__m128i) __builtin_ia32_mpsadbw128((__v16qi)__X, (__v16qi)__Y, (M)); }) + (__m128i) __builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X), \ + (__v16qi)(__m128i)(Y), (M)); }) static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_minpos_epu16(__m128i __V) @@ -379,9 +375,13 @@ _mm_minpos_epu16(__m128i __V) return (__m128i) __builtin_ia32_phminposuw128((__v8hi)__V); } +/* Handle the sse4.2 definitions here. */ + /* These definitions are normally in nmmintrin.h, but gcc puts them in here so we'll do the same. */ -#ifdef __SSE4_2__ + +#undef __DEFAULT_FN_ATTRS +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.2"))) /* These specify the type of data that we're comparing. */ #define _SIDD_UBYTE_OPS 0x00 @@ -410,36 +410,59 @@ _mm_minpos_epu16(__m128i __V) #define _SIDD_UNIT_MASK 0x40 /* SSE4.2 Packed Comparison Intrinsics. */ -#define _mm_cmpistrm(A, B, M) __builtin_ia32_pcmpistrm128((A), (B), (M)) -#define _mm_cmpistri(A, B, M) __builtin_ia32_pcmpistri128((A), (B), (M)) +#define _mm_cmpistrm(A, B, M) \ + (__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(M)) +#define _mm_cmpistri(A, B, M) \ + (int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(M)) #define _mm_cmpestrm(A, LA, B, LB, M) \ - __builtin_ia32_pcmpestrm128((A), (LA), (B), (LB), (M)) + (__m128i)__builtin_ia32_pcmpestrm128((__v16qi)(__m128i)(A), (int)(LA), \ + (__v16qi)(__m128i)(B), (int)(LB), \ + (int)(M)) #define _mm_cmpestri(A, LA, B, LB, M) \ - __builtin_ia32_pcmpestri128((A), (LA), (B), (LB), (M)) - + (int)__builtin_ia32_pcmpestri128((__v16qi)(__m128i)(A), (int)(LA), \ + (__v16qi)(__m128i)(B), (int)(LB), \ + (int)(M)) + /* SSE4.2 Packed Comparison Intrinsics and EFlag Reading. */ #define _mm_cmpistra(A, B, M) \ - __builtin_ia32_pcmpistria128((A), (B), (M)) + (int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(M)) #define _mm_cmpistrc(A, B, M) \ - __builtin_ia32_pcmpistric128((A), (B), (M)) + (int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(M)) #define _mm_cmpistro(A, B, M) \ - __builtin_ia32_pcmpistrio128((A), (B), (M)) + (int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(M)) #define _mm_cmpistrs(A, B, M) \ - __builtin_ia32_pcmpistris128((A), (B), (M)) + (int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(M)) #define _mm_cmpistrz(A, B, M) \ - __builtin_ia32_pcmpistriz128((A), (B), (M)) + (int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(M)) #define _mm_cmpestra(A, LA, B, LB, M) \ - __builtin_ia32_pcmpestria128((A), (LA), (B), (LB), (M)) + (int)__builtin_ia32_pcmpestria128((__v16qi)(__m128i)(A), (int)(LA), \ + (__v16qi)(__m128i)(B), (int)(LB), \ + (int)(M)) #define _mm_cmpestrc(A, LA, B, LB, M) \ - __builtin_ia32_pcmpestric128((A), (LA), (B), (LB), (M)) + (int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(A), (int)(LA), \ + (__v16qi)(__m128i)(B), (int)(LB), \ + (int)(M)) #define _mm_cmpestro(A, LA, B, LB, M) \ - __builtin_ia32_pcmpestrio128((A), (LA), (B), (LB), (M)) + (int)__builtin_ia32_pcmpestrio128((__v16qi)(__m128i)(A), (int)(LA), \ + (__v16qi)(__m128i)(B), (int)(LB), \ + (int)(M)) #define _mm_cmpestrs(A, LA, B, LB, M) \ - __builtin_ia32_pcmpestris128((A), (LA), (B), (LB), (M)) + (int)__builtin_ia32_pcmpestris128((__v16qi)(__m128i)(A), (int)(LA), \ + (__v16qi)(__m128i)(B), (int)(LB), \ + (int)(M)) #define _mm_cmpestrz(A, LA, B, LB, M) \ - __builtin_ia32_pcmpestriz128((A), (LA), (B), (LB), (M)) + (int)__builtin_ia32_pcmpestriz128((__v16qi)(__m128i)(A), (int)(LA), \ + (__v16qi)(__m128i)(B), (int)(LB), \ + (int)(M)) /* SSE4.2 Compare Packed Data -- Greater Than. */ static __inline__ __m128i __DEFAULT_FN_ATTRS @@ -481,7 +504,4 @@ _mm_crc32_u64(unsigned long long __C, unsigned long long __D) #include #endif -#endif /* __SSE4_2__ */ -#endif /* __SSE4_1__ */ - #endif /* _SMMINTRIN_H */ diff --git a/c_headers/stdatomic.h b/c_headers/stdatomic.h index e03798766..23bb3a357 100644 --- a/c_headers/stdatomic.h +++ b/c_headers/stdatomic.h @@ -45,11 +45,11 @@ extern "C" { #define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE #define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE #define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE -#define ATOMIC_SHORT_T_LOCK_FREE __GCC_ATOMIC_SHORT_T_LOCK_FREE -#define ATOMIC_INT_T_LOCK_FREE __GCC_ATOMIC_INT_T_LOCK_FREE -#define ATOMIC_LONG_T_LOCK_FREE __GCC_ATOMIC_LONG_T_LOCK_FREE -#define ATOMIC_LLONG_T_LOCK_FREE __GCC_ATOMIC_LLONG_T_LOCK_FREE -#define ATOMIC_POINTER_T_LOCK_FREE __GCC_ATOMIC_POINTER_T_LOCK_FREE +#define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE +#define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE +#define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE +#define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE +#define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE /* 7.17.2 Initialization */ diff --git a/c_headers/stdint.h b/c_headers/stdint.h index 0303db90b..3f2fcbc57 100644 --- a/c_headers/stdint.h +++ b/c_headers/stdint.h @@ -77,14 +77,14 @@ * C99 7.18.1.2 Minimum-width integer types. * C99 7.18.1.3 Fastest minimum-width integer types. * - * The standard requires that exact-width type be defined for 8-, 16-, 32-, and + * The standard requires that exact-width type be defined for 8-, 16-, 32-, and * 64-bit types if they are implemented. Other exact width types are optional. * This implementation defines an exact-width types for every integer width * that is represented in the standard integer types. * * The standard also requires minimum-width types be defined for 8-, 16-, 32-, * and 64-bit widths regardless of whether there are corresponding exact-width - * types. + * types. * * To accommodate targets that are missing types that are exactly 8, 16, 32, or * 64 bits wide, this implementation takes an approach of cascading @@ -97,7 +97,7 @@ * suboptimal. * * In violation of the standard, some targets do not implement a type that is - * wide enough to represent all of the required widths (8-, 16-, 32-, 64-bit). + * wide enough to represent all of the required widths (8-, 16-, 32-, 64-bit). * To accommodate these targets, a required minimum-width type is only * defined if there exists an exact-width type of equal or greater width. */ @@ -247,7 +247,7 @@ typedef __uint_least8_t uint_fast8_t; #endif /* __int_least8_t */ /* prevent glibc sys/types.h from defining conflicting types */ -#ifndef __int8_t_defined +#ifndef __int8_t_defined # define __int8_t_defined #endif /* __int8_t_defined */ @@ -280,9 +280,9 @@ typedef __UINTMAX_TYPE__ uintmax_t; * * The standard requires that integer constant macros be defined for all the * minimum-width types defined above. As 8-, 16-, 32-, and 64-bit minimum-width - * types are required, the corresponding integer constant macros are defined + * types are required, the corresponding integer constant macros are defined * here. This implementation also defines minimum-width types for every other - * integer width that the target implements, so corresponding macros are + * integer width that the target implements, so corresponding macros are * defined below, too. * * These macros are defined using the same successive-shrinking approach as @@ -452,7 +452,7 @@ typedef __UINTMAX_TYPE__ uintmax_t; #endif /* __int_least8_t */ -/* C99 7.18.2.1 Limits of exact-width integer types. +/* C99 7.18.2.1 Limits of exact-width integer types. * C99 7.18.2.2 Limits of minimum-width integer types. * C99 7.18.2.3 Limits of fastest minimum-width integer types. * diff --git a/c_headers/tbmintrin.h b/c_headers/tbmintrin.h index 48c0b07f4..1d0d746a8 100644 --- a/c_headers/tbmintrin.h +++ b/c_headers/tbmintrin.h @@ -21,10 +21,6 @@ *===-----------------------------------------------------------------------=== */ -#ifndef __TBM__ -#error "TBM instruction set is not enabled" -#endif - #ifndef __X86INTRIN_H #error "Never use directly; include instead." #endif @@ -33,119 +29,123 @@ #define __TBMINTRIN_H /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("tbm"))) -#define __bextri_u32(a, b) (__builtin_ia32_bextri_u32((a), (b))) +#define __bextri_u32(a, b) \ + ((unsigned int)__builtin_ia32_bextri_u32((unsigned int)(a), \ + (unsigned int)(b))) static __inline__ unsigned int __DEFAULT_FN_ATTRS -__blcfill_u32(unsigned int a) +__blcfill_u32(unsigned int __a) { - return a & (a + 1); + return __a & (__a + 1); } static __inline__ unsigned int __DEFAULT_FN_ATTRS -__blci_u32(unsigned int a) +__blci_u32(unsigned int __a) { - return a | ~(a + 1); + return __a | ~(__a + 1); } static __inline__ unsigned int __DEFAULT_FN_ATTRS -__blcic_u32(unsigned int a) +__blcic_u32(unsigned int __a) { - return ~a & (a + 1); + return ~__a & (__a + 1); } static __inline__ unsigned int __DEFAULT_FN_ATTRS -__blcmsk_u32(unsigned int a) +__blcmsk_u32(unsigned int __a) { - return a ^ (a + 1); + return __a ^ (__a + 1); } static __inline__ unsigned int __DEFAULT_FN_ATTRS -__blcs_u32(unsigned int a) +__blcs_u32(unsigned int __a) { - return a | (a + 1); + return __a | (__a + 1); } static __inline__ unsigned int __DEFAULT_FN_ATTRS -__blsfill_u32(unsigned int a) +__blsfill_u32(unsigned int __a) { - return a | (a - 1); + return __a | (__a - 1); } static __inline__ unsigned int __DEFAULT_FN_ATTRS -__blsic_u32(unsigned int a) +__blsic_u32(unsigned int __a) { - return ~a | (a - 1); + return ~__a | (__a - 1); } static __inline__ unsigned int __DEFAULT_FN_ATTRS -__t1mskc_u32(unsigned int a) +__t1mskc_u32(unsigned int __a) { - return ~a | (a + 1); + return ~__a | (__a + 1); } static __inline__ unsigned int __DEFAULT_FN_ATTRS -__tzmsk_u32(unsigned int a) +__tzmsk_u32(unsigned int __a) { - return ~a & (a - 1); + return ~__a & (__a - 1); } #ifdef __x86_64__ -#define __bextri_u64(a, b) (__builtin_ia32_bextri_u64((a), (int)(b))) +#define __bextri_u64(a, b) \ + ((unsigned long long)__builtin_ia32_bextri_u64((unsigned long long)(a), \ + (unsigned long long)(b))) static __inline__ unsigned long long __DEFAULT_FN_ATTRS -__blcfill_u64(unsigned long long a) +__blcfill_u64(unsigned long long __a) { - return a & (a + 1); + return __a & (__a + 1); } static __inline__ unsigned long long __DEFAULT_FN_ATTRS -__blci_u64(unsigned long long a) +__blci_u64(unsigned long long __a) { - return a | ~(a + 1); + return __a | ~(__a + 1); } static __inline__ unsigned long long __DEFAULT_FN_ATTRS -__blcic_u64(unsigned long long a) +__blcic_u64(unsigned long long __a) { - return ~a & (a + 1); + return ~__a & (__a + 1); } static __inline__ unsigned long long __DEFAULT_FN_ATTRS -__blcmsk_u64(unsigned long long a) +__blcmsk_u64(unsigned long long __a) { - return a ^ (a + 1); + return __a ^ (__a + 1); } static __inline__ unsigned long long __DEFAULT_FN_ATTRS -__blcs_u64(unsigned long long a) +__blcs_u64(unsigned long long __a) { - return a | (a + 1); + return __a | (__a + 1); } static __inline__ unsigned long long __DEFAULT_FN_ATTRS -__blsfill_u64(unsigned long long a) +__blsfill_u64(unsigned long long __a) { - return a | (a - 1); + return __a | (__a - 1); } static __inline__ unsigned long long __DEFAULT_FN_ATTRS -__blsic_u64(unsigned long long a) +__blsic_u64(unsigned long long __a) { - return ~a | (a - 1); + return ~__a | (__a - 1); } static __inline__ unsigned long long __DEFAULT_FN_ATTRS -__t1mskc_u64(unsigned long long a) +__t1mskc_u64(unsigned long long __a) { - return ~a | (a + 1); + return ~__a | (__a + 1); } static __inline__ unsigned long long __DEFAULT_FN_ATTRS -__tzmsk_u64(unsigned long long a) +__tzmsk_u64(unsigned long long __a) { - return ~a & (a - 1); + return ~__a & (__a - 1); } #endif diff --git a/c_headers/tgmath.h b/c_headers/tgmath.h index a48e267e6..318e1185f 100644 --- a/c_headers/tgmath.h +++ b/c_headers/tgmath.h @@ -490,7 +490,7 @@ static double _Complex static long double _Complex _TG_ATTRS - __tg_pow(long double _Complex __x, long double _Complex __y) + __tg_pow(long double _Complex __x, long double _Complex __y) {return cpowl(__x, __y);} #undef pow diff --git a/c_headers/tmmintrin.h b/c_headers/tmmintrin.h index 2ecc730e9..80664043a 100644 --- a/c_headers/tmmintrin.h +++ b/c_headers/tmmintrin.h @@ -20,203 +20,748 @@ * *===-----------------------------------------------------------------------=== */ - + #ifndef __TMMINTRIN_H #define __TMMINTRIN_H -#ifndef __SSSE3__ -#error "SSSE3 instruction set not enabled" -#else - #include /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("ssse3"))) +/// \brief Computes the absolute value of each of the packed 8-bit signed +/// integers in the source operand and stores the 8-bit unsigned integer +/// results in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PABSB instruction. +/// +/// \param __a +/// A 64-bit vector of [8 x i8]. +/// \returns A 64-bit integer vector containing the absolute values of the +/// elements in the operand. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_abs_pi8(__m64 __a) { return (__m64)__builtin_ia32_pabsb((__v8qi)__a); } +/// \brief Computes the absolute value of each of the packed 8-bit signed +/// integers in the source operand and stores the 8-bit unsigned integer +/// results in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPABSB instruction. +/// +/// \param __a +/// A 128-bit vector of [16 x i8]. +/// \returns A 128-bit integer vector containing the absolute values of the +/// elements in the operand. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_abs_epi8(__m128i __a) { return (__m128i)__builtin_ia32_pabsb128((__v16qi)__a); } +/// \brief Computes the absolute value of each of the packed 16-bit signed +/// integers in the source operand and stores the 16-bit unsigned integer +/// results in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PABSW instruction. +/// +/// \param __a +/// A 64-bit vector of [4 x i16]. +/// \returns A 64-bit integer vector containing the absolute values of the +/// elements in the operand. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_abs_pi16(__m64 __a) { return (__m64)__builtin_ia32_pabsw((__v4hi)__a); } +/// \brief Computes the absolute value of each of the packed 16-bit signed +/// integers in the source operand and stores the 16-bit unsigned integer +/// results in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPABSW instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16]. +/// \returns A 128-bit integer vector containing the absolute values of the +/// elements in the operand. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_abs_epi16(__m128i __a) { return (__m128i)__builtin_ia32_pabsw128((__v8hi)__a); } +/// \brief Computes the absolute value of each of the packed 32-bit signed +/// integers in the source operand and stores the 32-bit unsigned integer +/// results in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PABSD instruction. +/// +/// \param __a +/// A 64-bit vector of [2 x i32]. +/// \returns A 64-bit integer vector containing the absolute values of the +/// elements in the operand. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_abs_pi32(__m64 __a) { return (__m64)__builtin_ia32_pabsd((__v2si)__a); } +/// \brief Computes the absolute value of each of the packed 32-bit signed +/// integers in the source operand and stores the 32-bit unsigned integer +/// results in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPABSD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x i32]. +/// \returns A 128-bit integer vector containing the absolute values of the +/// elements in the operand. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_abs_epi32(__m128i __a) { return (__m128i)__builtin_ia32_pabsd128((__v4si)__a); } +/// \brief Concatenates the two 128-bit integer vector operands, and +/// right-shifts the result by the number of bytes specified in the immediate +/// operand. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_alignr_epi8(__m128i a, __m128i b, const int n); +/// \endcode +/// +/// This intrinsic corresponds to the \c PALIGNR instruction. +/// +/// \param a +/// A 128-bit vector of [16 x i8] containing one of the source operands. +/// \param b +/// A 128-bit vector of [16 x i8] containing one of the source operands. +/// \param n +/// An immediate operand specifying how many bytes to right-shift the result. +/// \returns A 128-bit integer vector containing the concatenated right-shifted +/// value. #define _mm_alignr_epi8(a, b, n) __extension__ ({ \ - __m128i __a = (a); \ - __m128i __b = (b); \ - (__m128i)__builtin_ia32_palignr128((__v16qi)__a, (__v16qi)__b, (n)); }) + (__m128i)__builtin_ia32_palignr128((__v16qi)(__m128i)(a), \ + (__v16qi)(__m128i)(b), (n)); }) +/// \brief Concatenates the two 64-bit integer vector operands, and right-shifts +/// the result by the number of bytes specified in the immediate operand. +/// +/// \headerfile +/// +/// \code +/// __m64 _mm_alignr_pi8(__m64 a, __m64 b, const int n); +/// \endcode +/// +/// This intrinsic corresponds to the \c PALIGNR instruction. +/// +/// \param a +/// A 64-bit vector of [8 x i8] containing one of the source operands. +/// \param b +/// A 64-bit vector of [8 x i8] containing one of the source operands. +/// \param n +/// An immediate operand specifying how many bytes to right-shift the result. +/// \returns A 64-bit integer vector containing the concatenated right-shifted +/// value. #define _mm_alignr_pi8(a, b, n) __extension__ ({ \ - __m64 __a = (a); \ - __m64 __b = (b); \ - (__m64)__builtin_ia32_palignr((__v8qi)__a, (__v8qi)__b, (n)); }) + (__m64)__builtin_ia32_palignr((__v8qi)(__m64)(a), (__v8qi)(__m64)(b), (n)); }) +/// \brief Horizontally adds the adjacent pairs of values contained in 2 packed +/// 128-bit vectors of [8 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHADDW instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16] containing one of the source operands. The +/// horizontal sums of the values are stored in the lower bits of the +/// destination. +/// \param __b +/// A 128-bit vector of [8 x i16] containing one of the source operands. The +/// horizontal sums of the values are stored in the upper bits of the +/// destination. +/// \returns A 128-bit vector of [8 x i16] containing the horizontal sums of +/// both operands. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_hadd_epi16(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_phaddw128((__v8hi)__a, (__v8hi)__b); } +/// \brief Horizontally adds the adjacent pairs of values contained in 2 packed +/// 128-bit vectors of [4 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHADDD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x i32] containing one of the source operands. The +/// horizontal sums of the values are stored in the lower bits of the +/// destination. +/// \param __b +/// A 128-bit vector of [4 x i32] containing one of the source operands. The +/// horizontal sums of the values are stored in the upper bits of the +/// destination. +/// \returns A 128-bit vector of [4 x i32] containing the horizontal sums of +/// both operands. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_hadd_epi32(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_phaddd128((__v4si)__a, (__v4si)__b); } +/// \brief Horizontally adds the adjacent pairs of values contained in 2 packed +/// 64-bit vectors of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PHADDW instruction. +/// +/// \param __a +/// A 64-bit vector of [4 x i16] containing one of the source operands. The +/// horizontal sums of the values are stored in the lower bits of the +/// destination. +/// \param __b +/// A 64-bit vector of [4 x i16] containing one of the source operands. The +/// horizontal sums of the values are stored in the upper bits of the +/// destination. +/// \returns A 64-bit vector of [4 x i16] containing the horizontal sums of both +/// operands. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_hadd_pi16(__m64 __a, __m64 __b) { return (__m64)__builtin_ia32_phaddw((__v4hi)__a, (__v4hi)__b); } +/// \brief Horizontally adds the adjacent pairs of values contained in 2 packed +/// 64-bit vectors of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PHADDD instruction. +/// +/// \param __a +/// A 64-bit vector of [2 x i32] containing one of the source operands. The +/// horizontal sums of the values are stored in the lower bits of the +/// destination. +/// \param __b +/// A 64-bit vector of [2 x i32] containing one of the source operands. The +/// horizontal sums of the values are stored in the upper bits of the +/// destination. +/// \returns A 64-bit vector of [2 x i32] containing the horizontal sums of both +/// operands. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_hadd_pi32(__m64 __a, __m64 __b) { return (__m64)__builtin_ia32_phaddd((__v2si)__a, (__v2si)__b); } +/// \brief Horizontally adds the adjacent pairs of values contained in 2 packed +/// 128-bit vectors of [8 x i16]. Positive sums greater than 7FFFh are +/// saturated to 7FFFh. Negative sums less than 8000h are saturated to 8000h. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHADDSW instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16] containing one of the source operands. The +/// horizontal sums of the values are stored in the lower bits of the +/// destination. +/// \param __b +/// A 128-bit vector of [8 x i16] containing one of the source operands. The +/// horizontal sums of the values are stored in the upper bits of the +/// destination. +/// \returns A 128-bit vector of [8 x i16] containing the horizontal saturated +/// sums of both operands. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_hadds_epi16(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_phaddsw128((__v8hi)__a, (__v8hi)__b); } +/// \brief Horizontally adds the adjacent pairs of values contained in 2 packed +/// 64-bit vectors of [4 x i16]. Positive sums greater than 7FFFh are +/// saturated to 7FFFh. Negative sums less than 8000h are saturated to 8000h. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PHADDSW instruction. +/// +/// \param __a +/// A 64-bit vector of [4 x i16] containing one of the source operands. The +/// horizontal sums of the values are stored in the lower bits of the +/// destination. +/// \param __b +/// A 64-bit vector of [4 x i16] containing one of the source operands. The +/// horizontal sums of the values are stored in the upper bits of the +/// destination. +/// \returns A 64-bit vector of [4 x i16] containing the horizontal saturated +/// sums of both operands. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_hadds_pi16(__m64 __a, __m64 __b) { return (__m64)__builtin_ia32_phaddsw((__v4hi)__a, (__v4hi)__b); } +/// \brief Horizontally subtracts the adjacent pairs of values contained in 2 +/// packed 128-bit vectors of [8 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHSUBW instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16] containing one of the source operands. The +/// horizontal differences between the values are stored in the lower bits of +/// the destination. +/// \param __b +/// A 128-bit vector of [8 x i16] containing one of the source operands. The +/// horizontal differences between the values are stored in the upper bits of +/// the destination. +/// \returns A 128-bit vector of [8 x i16] containing the horizontal differences +/// of both operands. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_hsub_epi16(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_phsubw128((__v8hi)__a, (__v8hi)__b); } +/// \brief Horizontally subtracts the adjacent pairs of values contained in 2 +/// packed 128-bit vectors of [4 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHSUBD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x i32] containing one of the source operands. The +/// horizontal differences between the values are stored in the lower bits of +/// the destination. +/// \param __b +/// A 128-bit vector of [4 x i32] containing one of the source operands. The +/// horizontal differences between the values are stored in the upper bits of +/// the destination. +/// \returns A 128-bit vector of [4 x i32] containing the horizontal differences +/// of both operands. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_hsub_epi32(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_phsubd128((__v4si)__a, (__v4si)__b); } +/// \brief Horizontally subtracts the adjacent pairs of values contained in 2 +/// packed 64-bit vectors of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PHSUBW instruction. +/// +/// \param __a +/// A 64-bit vector of [4 x i16] containing one of the source operands. The +/// horizontal differences between the values are stored in the lower bits of +/// the destination. +/// \param __b +/// A 64-bit vector of [4 x i16] containing one of the source operands. The +/// horizontal differences between the values are stored in the upper bits of +/// the destination. +/// \returns A 64-bit vector of [4 x i16] containing the horizontal differences +/// of both operands. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_hsub_pi16(__m64 __a, __m64 __b) { return (__m64)__builtin_ia32_phsubw((__v4hi)__a, (__v4hi)__b); } +/// \brief Horizontally subtracts the adjacent pairs of values contained in 2 +/// packed 64-bit vectors of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PHSUBD instruction. +/// +/// \param __a +/// A 64-bit vector of [2 x i32] containing one of the source operands. The +/// horizontal differences between the values are stored in the lower bits of +/// the destination. +/// \param __b +/// A 64-bit vector of [2 x i32] containing one of the source operands. The +/// horizontal differences between the values are stored in the upper bits of +/// the destination. +/// \returns A 64-bit vector of [2 x i32] containing the horizontal differences +/// of both operands. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_hsub_pi32(__m64 __a, __m64 __b) { return (__m64)__builtin_ia32_phsubd((__v2si)__a, (__v2si)__b); } +/// \brief Horizontally subtracts the adjacent pairs of values contained in 2 +/// packed 128-bit vectors of [8 x i16]. Positive differences greater than +/// 7FFFh are saturated to 7FFFh. Negative differences less than 8000h are +/// saturated to 8000h. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHSUBSW instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16] containing one of the source operands. The +/// horizontal differences between the values are stored in the lower bits of +/// the destination. +/// \param __b +/// A 128-bit vector of [8 x i16] containing one of the source operands. The +/// horizontal differences between the values are stored in the upper bits of +/// the destination. +/// \returns A 128-bit vector of [8 x i16] containing the horizontal saturated +/// differences of both operands. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_hsubs_epi16(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_phsubsw128((__v8hi)__a, (__v8hi)__b); } +/// \brief Horizontally subtracts the adjacent pairs of values contained in 2 +/// packed 64-bit vectors of [4 x i16]. Positive differences greater than +/// 7FFFh are saturated to 7FFFh. Negative differences less than 8000h are +/// saturated to 8000h. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PHSUBSW instruction. +/// +/// \param __a +/// A 64-bit vector of [4 x i16] containing one of the source operands. The +/// horizontal differences between the values are stored in the lower bits of +/// the destination. +/// \param __b +/// A 64-bit vector of [4 x i16] containing one of the source operands. The +/// horizontal differences between the values are stored in the upper bits of +/// the destination. +/// \returns A 64-bit vector of [4 x i16] containing the horizontal saturated +/// differences of both operands. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_hsubs_pi16(__m64 __a, __m64 __b) { return (__m64)__builtin_ia32_phsubsw((__v4hi)__a, (__v4hi)__b); } +/// \brief Multiplies corresponding pairs of packed 8-bit unsigned integer +/// values contained in the first source operand and packed 8-bit signed +/// integer values contained in the second source operand, adds pairs of +/// contiguous products with signed saturation, and writes the 16-bit sums to +/// the corresponding bits in the destination. For example, bits [7:0] of +/// both operands are multiplied, bits [15:8] of both operands are +/// multiplied, and the sum of both results is written to bits [15:0] of the +/// destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMADDUBSW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the first source operand. +/// \param __b +/// A 128-bit integer vector containing the second source operand. +/// \returns A 128-bit integer vector containing the sums of products of both +/// operands: \n +/// \a R0 := (\a __a0 * \a __b0) + (\a __a1 * \a __b1) \n +/// \a R1 := (\a __a2 * \a __b2) + (\a __a3 * \a __b3) \n +/// \a R2 := (\a __a4 * \a __b4) + (\a __a5 * \a __b5) \n +/// \a R3 := (\a __a6 * \a __b6) + (\a __a7 * \a __b7) \n +/// \a R4 := (\a __a8 * \a __b8) + (\a __a9 * \a __b9) \n +/// \a R5 := (\a __a10 * \a __b10) + (\a __a11 * \a __b11) \n +/// \a R6 := (\a __a12 * \a __b12) + (\a __a13 * \a __b13) \n +/// \a R7 := (\a __a14 * \a __b14) + (\a __a15 * \a __b15) static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_maddubs_epi16(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_pmaddubsw128((__v16qi)__a, (__v16qi)__b); } +/// \brief Multiplies corresponding pairs of packed 8-bit unsigned integer +/// values contained in the first source operand and packed 8-bit signed +/// integer values contained in the second source operand, adds pairs of +/// contiguous products with signed saturation, and writes the 16-bit sums to +/// the corresponding bits in the destination. For example, bits [7:0] of +/// both operands are multiplied, bits [15:8] of both operands are +/// multiplied, and the sum of both results is written to bits [15:0] of the +/// destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PMADDUBSW instruction. +/// +/// \param __a +/// A 64-bit integer vector containing the first source operand. +/// \param __b +/// A 64-bit integer vector containing the second source operand. +/// \returns A 64-bit integer vector containing the sums of products of both +/// operands: \n +/// \a R0 := (\a __a0 * \a __b0) + (\a __a1 * \a __b1) \n +/// \a R1 := (\a __a2 * \a __b2) + (\a __a3 * \a __b3) \n +/// \a R2 := (\a __a4 * \a __b4) + (\a __a5 * \a __b5) \n +/// \a R3 := (\a __a6 * \a __b6) + (\a __a7 * \a __b7) static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_maddubs_pi16(__m64 __a, __m64 __b) { return (__m64)__builtin_ia32_pmaddubsw((__v8qi)__a, (__v8qi)__b); } +/// \brief Multiplies packed 16-bit signed integer values, truncates the 32-bit +/// products to the 18 most significant bits by right-shifting, rounds the +/// truncated value by adding 1, and writes bits [16:1] to the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMULHRSW instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [8 x i16] containing one of the source operands. +/// \returns A 128-bit vector of [8 x i16] containing the rounded and scaled +/// products of both operands. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhrs_epi16(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_pmulhrsw128((__v8hi)__a, (__v8hi)__b); } +/// \brief Multiplies packed 16-bit signed integer values, truncates the 32-bit +/// products to the 18 most significant bits by right-shifting, rounds the +/// truncated value by adding 1, and writes bits [16:1] to the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PMULHRSW instruction. +/// +/// \param __a +/// A 64-bit vector of [4 x i16] containing one of the source operands. +/// \param __b +/// A 64-bit vector of [4 x i16] containing one of the source operands. +/// \returns A 64-bit vector of [4 x i16] containing the rounded and scaled +/// products of both operands. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_mulhrs_pi16(__m64 __a, __m64 __b) { return (__m64)__builtin_ia32_pmulhrsw((__v4hi)__a, (__v4hi)__b); } +/// \brief Copies the 8-bit integers from a 128-bit integer vector to the +/// destination or clears 8-bit values in the destination, as specified by +/// the second source operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSHUFB instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the values to be copied. +/// \param __b +/// A 128-bit integer vector containing control bytes corresponding to +/// positions in the destination: +/// Bit 7: \n +/// 1: Clear the corresponding byte in the destination. \n +/// 0: Copy the selected source byte to the corresponding byte in the +/// destination. \n +/// Bits [6:4] Reserved. \n +/// Bits [3:0] select the source byte to be copied. +/// \returns A 128-bit integer vector containing the copied or cleared values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_shuffle_epi8(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_pshufb128((__v16qi)__a, (__v16qi)__b); } +/// \brief Copies the 8-bit integers from a 64-bit integer vector to the +/// destination or clears 8-bit values in the destination, as specified by +/// the second source operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PSHUFB instruction. +/// +/// \param __a +/// A 64-bit integer vector containing the values to be copied. +/// \param __b +/// A 64-bit integer vector containing control bytes corresponding to +/// positions in the destination: +/// Bit 7: \n +/// 1: Clear the corresponding byte in the destination. \n +/// 0: Copy the selected source byte to the corresponding byte in the +/// destination. \n +/// Bits [3:0] select the source byte to be copied. +/// \returns A 64-bit integer vector containing the copied or cleared values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_shuffle_pi8(__m64 __a, __m64 __b) { return (__m64)__builtin_ia32_pshufb((__v8qi)__a, (__v8qi)__b); } +/// \brief For each 8-bit integer in the first source operand, perform one of +/// the following actions as specified by the second source operand: If the +/// byte in the second source is negative, calculate the two's complement of +/// the corresponding byte in the first source, and write that value to the +/// destination. If the byte in the second source is positive, copy the +/// corresponding byte from the first source to the destination. If the byte +/// in the second source is zero, clear the corresponding byte in the +/// destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSIGNB instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the values to be copied. +/// \param __b +/// A 128-bit integer vector containing control bytes corresponding to +/// positions in the destination. +/// \returns A 128-bit integer vector containing the resultant values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sign_epi8(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_psignb128((__v16qi)__a, (__v16qi)__b); } +/// \brief For each 16-bit integer in the first source operand, perform one of +/// the following actions as specified by the second source operand: If the +/// word in the second source is negative, calculate the two's complement of +/// the corresponding word in the first source, and write that value to the +/// destination. If the word in the second source is positive, copy the +/// corresponding word from the first source to the destination. If the word +/// in the second source is zero, clear the corresponding word in the +/// destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSIGNW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the values to be copied. +/// \param __b +/// A 128-bit integer vector containing control words corresponding to +/// positions in the destination. +/// \returns A 128-bit integer vector containing the resultant values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sign_epi16(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_psignw128((__v8hi)__a, (__v8hi)__b); } +/// \brief For each 32-bit integer in the first source operand, perform one of +/// the following actions as specified by the second source operand: If the +/// doubleword in the second source is negative, calculate the two's +/// complement of the corresponding word in the first source, and write that +/// value to the destination. If the doubleword in the second source is +/// positive, copy the corresponding word from the first source to the +/// destination. If the doubleword in the second source is zero, clear the +/// corresponding word in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSIGND instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the values to be copied. +/// \param __b +/// A 128-bit integer vector containing control doublewords corresponding to +/// positions in the destination. +/// \returns A 128-bit integer vector containing the resultant values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sign_epi32(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_psignd128((__v4si)__a, (__v4si)__b); } +/// \brief For each 8-bit integer in the first source operand, perform one of +/// the following actions as specified by the second source operand: If the +/// byte in the second source is negative, calculate the two's complement of +/// the corresponding byte in the first source, and write that value to the +/// destination. If the byte in the second source is positive, copy the +/// corresponding byte from the first source to the destination. If the byte +/// in the second source is zero, clear the corresponding byte in the +/// destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PSIGNB instruction. +/// +/// \param __a +/// A 64-bit integer vector containing the values to be copied. +/// \param __b +/// A 64-bit integer vector containing control bytes corresponding to +/// positions in the destination. +/// \returns A 64-bit integer vector containing the resultant values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_sign_pi8(__m64 __a, __m64 __b) { return (__m64)__builtin_ia32_psignb((__v8qi)__a, (__v8qi)__b); } +/// \brief For each 16-bit integer in the first source operand, perform one of +/// the following actions as specified by the second source operand: If the +/// word in the second source is negative, calculate the two's complement of +/// the corresponding word in the first source, and write that value to the +/// destination. If the word in the second source is positive, copy the +/// corresponding word from the first source to the destination. If the word +/// in the second source is zero, clear the corresponding word in the +/// destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PSIGNW instruction. +/// +/// \param __a +/// A 64-bit integer vector containing the values to be copied. +/// \param __b +/// A 64-bit integer vector containing control words corresponding to +/// positions in the destination. +/// \returns A 64-bit integer vector containing the resultant values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_sign_pi16(__m64 __a, __m64 __b) { return (__m64)__builtin_ia32_psignw((__v4hi)__a, (__v4hi)__b); } +/// \brief For each 32-bit integer in the first source operand, perform one of +/// the following actions as specified by the second source operand: If the +/// doubleword in the second source is negative, calculate the two's +/// complement of the corresponding doubleword in the first source, and +/// write that value to the destination. If the doubleword in the second +/// source is positive, copy the corresponding doubleword from the first +/// source to the destination. If the doubleword in the second source is +/// zero, clear the corresponding doubleword in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PSIGND instruction. +/// +/// \param __a +/// A 64-bit integer vector containing the values to be copied. +/// \param __b +/// A 64-bit integer vector containing two control doublewords corresponding +/// to positions in the destination. +/// \returns A 64-bit integer vector containing the resultant values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_sign_pi32(__m64 __a, __m64 __b) { @@ -225,6 +770,4 @@ _mm_sign_pi32(__m64 __a, __m64 __b) #undef __DEFAULT_FN_ATTRS -#endif /* __SSSE3__ */ - #endif /* __TMMINTRIN_H */ diff --git a/c_headers/unwind.h b/c_headers/unwind.h index 303d79288..4f74a3478 100644 --- a/c_headers/unwind.h +++ b/c_headers/unwind.h @@ -79,6 +79,10 @@ struct _Unwind_Context; struct _Unwind_Exception; typedef enum { _URC_NO_REASON = 0, +#if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \ + !defined(__ARM_DWARF_EH__) + _URC_OK = 0, /* used by ARM EHABI */ +#endif _URC_FOREIGN_EXCEPTION_CAUGHT = 1, _URC_FATAL_PHASE2_ERROR = 2, @@ -88,7 +92,11 @@ typedef enum { _URC_END_OF_STACK = 5, _URC_HANDLER_FOUND = 6, _URC_INSTALL_CONTEXT = 7, - _URC_CONTINUE_UNWIND = 8 + _URC_CONTINUE_UNWIND = 8, +#if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \ + !defined(__ARM_DWARF_EH__) + _URC_FAILURE = 9 /* used by ARM EHABI */ +#endif } _Unwind_Reason_Code; typedef enum { @@ -150,6 +158,15 @@ typedef enum { _UVRSR_FAILED = 2 } _Unwind_VRS_Result; +#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__ARM_DWARF_EH__) +typedef uint32_t _Unwind_State; +#define _US_VIRTUAL_UNWIND_FRAME ((_Unwind_State)0) +#define _US_UNWIND_FRAME_STARTING ((_Unwind_State)1) +#define _US_UNWIND_FRAME_RESUME ((_Unwind_State)2) +#define _US_ACTION_MASK ((_Unwind_State)3) +#define _US_FORCE_UNWIND ((_Unwind_State)8) +#endif + _Unwind_VRS_Result _Unwind_VRS_Get(struct _Unwind_Context *__context, _Unwind_VRS_RegClass __regclass, uint32_t __regno, diff --git a/c_headers/wmmintrin.h b/c_headers/wmmintrin.h index 369e3c208..a2d931010 100644 --- a/c_headers/wmmintrin.h +++ b/c_headers/wmmintrin.h @@ -26,17 +26,8 @@ #include -#if !defined (__AES__) && !defined (__PCLMUL__) -# error "AES/PCLMUL instructions not enabled" -#else - -#ifdef __AES__ #include <__wmmintrin_aes.h> -#endif /* __AES__ */ -#ifdef __PCLMUL__ #include <__wmmintrin_pclmul.h> -#endif /* __PCLMUL__ */ -#endif /* __AES__ || __PCLMUL__ */ #endif /* _WMMINTRIN_H */ diff --git a/c_headers/x86intrin.h b/c_headers/x86intrin.h index 21a43daf3..81a404f55 100644 --- a/c_headers/x86intrin.h +++ b/c_headers/x86intrin.h @@ -28,54 +28,58 @@ #include -#ifdef __3dNOW__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__3dNOW__) #include #endif -#ifdef __BMI__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI__) #include #endif -#ifdef __BMI2__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI2__) #include #endif -#ifdef __LZCNT__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__LZCNT__) #include #endif -#ifdef __POPCNT__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__POPCNT__) #include #endif -#ifdef __RDSEED__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDSEED__) #include #endif -#ifdef __PRFCHW__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PRFCHW__) #include #endif -#ifdef __SSE4A__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE4A__) #include #endif -#ifdef __FMA4__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FMA4__) #include #endif -#ifdef __XOP__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XOP__) #include #endif -#ifdef __TBM__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__TBM__) #include #endif -#ifdef __F16C__ +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__F16C__) #include #endif +#if !defined(_MSC_VER) || __has_feature(modules) || defined(__MWAITX__) +#include +#endif + /* FIXME: LWP */ #endif /* __X86INTRIN_H */ diff --git a/c_headers/xmmintrin.h b/c_headers/xmmintrin.h index 0d58c7530..dc31b85cf 100644 --- a/c_headers/xmmintrin.h +++ b/c_headers/xmmintrin.h @@ -20,13 +20,9 @@ * *===-----------------------------------------------------------------------=== */ - + #ifndef __XMMINTRIN_H #define __XMMINTRIN_H - -#ifndef __SSE__ -#error "SSE instruction set not enabled" -#else #include @@ -34,6 +30,9 @@ typedef int __v4si __attribute__((__vector_size__(16))); typedef float __v4sf __attribute__((__vector_size__(16))); typedef float __m128 __attribute__((__vector_size__(16))); +/* Unsigned types */ +typedef unsigned int __v4su __attribute__((__vector_size__(16))); + /* This header should only be included in a hosted environment as it depends on * a standard library to provide allocation routines. */ #if __STDC_HOSTED__ @@ -41,8 +40,23 @@ typedef float __m128 __attribute__((__vector_size__(16))); #endif /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse"))) +/// \brief Adds the 32-bit float values in the low-order bits of the operands. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDSS / ADDSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// The lower 32 bits of this operand are used in the calculation. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// The lower 32 bits of this operand are used in the calculation. +/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the sum +/// of the lower 32 bits of both operands. The upper 96 bits are copied from +/// the upper 96 bits of the first source operand. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_add_ss(__m128 __a, __m128 __b) { @@ -50,12 +64,41 @@ _mm_add_ss(__m128 __a, __m128 __b) return __a; } +/// \brief Adds two 128-bit vectors of [4 x float], and returns the results of +/// the addition. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDPS / ADDPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// \returns A 128-bit vector of [4 x float] containing the sums of both +/// operands. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_add_ps(__m128 __a, __m128 __b) { - return __a + __b; + return (__m128)((__v4sf)__a + (__v4sf)__b); } +/// \brief Subtracts the 32-bit float value in the low-order bits of the second +/// operand from the corresponding value in the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSUBSS / SUBSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing the minuend. The lower 32 bits +/// of this operand are used in the calculation. +/// \param __b +/// A 128-bit vector of [4 x float] containing the subtrahend. The lower 32 +/// bits of this operand are used in the calculation. +/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the +/// difference of the lower 32 bits of both operands. The upper 96 bits are +/// copied from the upper 96 bits of the first source operand. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_sub_ss(__m128 __a, __m128 __b) { @@ -63,12 +106,42 @@ _mm_sub_ss(__m128 __a, __m128 __b) return __a; } +/// \brief Subtracts each of the values of the second operand from the first +/// operand, both of which are 128-bit vectors of [4 x float] and returns +/// the results of the subtraction. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSUBPS / SUBPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing the minuend. +/// \param __b +/// A 128-bit vector of [4 x float] containing the subtrahend. +/// \returns A 128-bit vector of [4 x float] containing the differences between +/// both operands. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_sub_ps(__m128 __a, __m128 __b) { - return __a - __b; + return (__m128)((__v4sf)__a - (__v4sf)__b); } +/// \brief Multiplies two 32-bit float values in the low-order bits of the +/// operands. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMULSS / MULSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// The lower 32 bits of this operand are used in the calculation. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// The lower 32 bits of this operand are used in the calculation. +/// \returns A 128-bit vector of [4 x float] containing the product of the lower +/// 32 bits of both operands. The upper 96 bits are copied from the upper 96 +/// bits of the first source operand. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mul_ss(__m128 __a, __m128 __b) { @@ -76,12 +149,41 @@ _mm_mul_ss(__m128 __a, __m128 __b) return __a; } +/// \brief Multiplies two 128-bit vectors of [4 x float] and returns the +/// results of the multiplication. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMULPS / MULPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// \returns A 128-bit vector of [4 x float] containing the products of both +/// operands. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mul_ps(__m128 __a, __m128 __b) { - return __a * __b; + return (__m128)((__v4sf)__a * (__v4sf)__b); } +/// \brief Divides the value in the low-order 32 bits of the first operand by +/// the corresponding value in the second operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDIVSS / DIVSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing the dividend. The lower 32 +/// bits of this operand are used in the calculation. +/// \param __b +/// A 128-bit vector of [4 x float] containing the divisor. The lower 32 bits +/// of this operand are used in the calculation. +/// \returns A 128-bit vector of [4 x float] containing the quotients of the +/// lower 32 bits of both operands. The upper 96 bits are copied from the +/// upper 96 bits of the first source operand. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_div_ss(__m128 __a, __m128 __b) { @@ -89,329 +191,1109 @@ _mm_div_ss(__m128 __a, __m128 __b) return __a; } +/// \brief Divides two 128-bit vectors of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDIVPS / DIVPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing the dividend. +/// \param __b +/// A 128-bit vector of [4 x float] containing the divisor. +/// \returns A 128-bit vector of [4 x float] containing the quotients of both +/// operands. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_div_ps(__m128 __a, __m128 __b) { - return __a / __b; + return (__m128)((__v4sf)__a / (__v4sf)__b); } +/// \brief Calculates the square root of the value stored in the low-order bits +/// of a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSQRTSS / SQRTSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the calculation. +/// \returns A 128-bit vector of [4 x float] containing the square root of the +/// value in the low-order bits of the operand. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_sqrt_ss(__m128 __a) { - __m128 __c = __builtin_ia32_sqrtss(__a); + __m128 __c = __builtin_ia32_sqrtss((__v4sf)__a); return (__m128) { __c[0], __a[1], __a[2], __a[3] }; } +/// \brief Calculates the square roots of the values stored in a 128-bit vector +/// of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSQRTPS / SQRTPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the square roots of the +/// values in the operand. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_sqrt_ps(__m128 __a) { - return __builtin_ia32_sqrtps(__a); + return __builtin_ia32_sqrtps((__v4sf)__a); } +/// \brief Calculates the approximate reciprocal of the value stored in the +/// low-order bits of a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VRCPSS / RCPSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the calculation. +/// \returns A 128-bit vector of [4 x float] containing the approximate +/// reciprocal of the value in the low-order bits of the operand. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_rcp_ss(__m128 __a) { - __m128 __c = __builtin_ia32_rcpss(__a); + __m128 __c = __builtin_ia32_rcpss((__v4sf)__a); return (__m128) { __c[0], __a[1], __a[2], __a[3] }; } +/// \brief Calculates the approximate reciprocals of the values stored in a +/// 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VRCPPS / RCPPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the approximate +/// reciprocals of the values in the operand. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_rcp_ps(__m128 __a) { - return __builtin_ia32_rcpps(__a); + return __builtin_ia32_rcpps((__v4sf)__a); } +/// \brief Calculates the approximate reciprocal of the square root of the value +/// stored in the low-order bits of a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VRSQRTSS / RSQRTSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the calculation. +/// \returns A 128-bit vector of [4 x float] containing the approximate +/// reciprocal of the square root of the value in the low-order bits of the +/// operand. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_rsqrt_ss(__m128 __a) { - __m128 __c = __builtin_ia32_rsqrtss(__a); + __m128 __c = __builtin_ia32_rsqrtss((__v4sf)__a); return (__m128) { __c[0], __a[1], __a[2], __a[3] }; } +/// \brief Calculates the approximate reciprocals of the square roots of the +/// values stored in a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VRSQRTPS / RSQRTPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the approximate +/// reciprocals of the square roots of the values in the operand. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_rsqrt_ps(__m128 __a) { - return __builtin_ia32_rsqrtps(__a); + return __builtin_ia32_rsqrtps((__v4sf)__a); } +/// \brief Compares two 32-bit float values in the low-order bits of both +/// operands and returns the lesser value in the low-order bits of the +/// vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMINSS / MINSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the +/// minimum value between both operands. The upper 96 bits are copied from +/// the upper 96 bits of the first source operand. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_min_ss(__m128 __a, __m128 __b) { - return __builtin_ia32_minss(__a, __b); + return __builtin_ia32_minss((__v4sf)__a, (__v4sf)__b); } +/// \brief Compares two 128-bit vectors of [4 x float] and returns the lesser +/// of each pair of values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMINPS / MINPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. +/// \returns A 128-bit vector of [4 x float] containing the minimum values +/// between both operands. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_min_ps(__m128 __a, __m128 __b) { - return __builtin_ia32_minps(__a, __b); + return __builtin_ia32_minps((__v4sf)__a, (__v4sf)__b); } +/// \brief Compares two 32-bit float values in the low-order bits of both +/// operands and returns the greater value in the low-order bits of a 128-bit +/// vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMAXSS / MAXSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the +/// maximum value between both operands. The upper 96 bits are copied from +/// the upper 96 bits of the first source operand. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_max_ss(__m128 __a, __m128 __b) { - return __builtin_ia32_maxss(__a, __b); + return __builtin_ia32_maxss((__v4sf)__a, (__v4sf)__b); } +/// \brief Compares two 128-bit vectors of [4 x float] and returns the greater +/// of each pair of values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMAXPS / MAXPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. +/// \returns A 128-bit vector of [4 x float] containing the maximum values +/// between both operands. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_max_ps(__m128 __a, __m128 __b) { - return __builtin_ia32_maxps(__a, __b); + return __builtin_ia32_maxps((__v4sf)__a, (__v4sf)__b); } +/// \brief Performs a bitwise AND of two 128-bit vectors of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VANDPS / ANDPS instructions. +/// +/// \param __a +/// A 128-bit vector containing one of the source operands. +/// \param __b +/// A 128-bit vector containing one of the source operands. +/// \returns A 128-bit vector of [4 x float] containing the bitwise AND of the +/// values between both operands. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_and_ps(__m128 __a, __m128 __b) { - return (__m128)((__v4si)__a & (__v4si)__b); + return (__m128)((__v4su)__a & (__v4su)__b); } +/// \brief Performs a bitwise AND of two 128-bit vectors of [4 x float], using +/// the one's complement of the values contained in the first source +/// operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VANDNPS / ANDNPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing the first source operand. The +/// one's complement of this value is used in the bitwise AND. +/// \param __b +/// A 128-bit vector of [4 x float] containing the second source operand. +/// \returns A 128-bit vector of [4 x float] containing the bitwise AND of the +/// one's complement of the first operand and the values in the second +/// operand. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_andnot_ps(__m128 __a, __m128 __b) { - return (__m128)(~(__v4si)__a & (__v4si)__b); + return (__m128)(~(__v4su)__a & (__v4su)__b); } +/// \brief Performs a bitwise OR of two 128-bit vectors of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VORPS / ORPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// \returns A 128-bit vector of [4 x float] containing the bitwise OR of the +/// values between both operands. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_or_ps(__m128 __a, __m128 __b) { - return (__m128)((__v4si)__a | (__v4si)__b); + return (__m128)((__v4su)__a | (__v4su)__b); } +/// \brief Performs a bitwise exclusive OR of two 128-bit vectors of +/// [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPS / XORPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// \returns A 128-bit vector of [4 x float] containing the bitwise exclusive OR +/// of the values between both operands. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_xor_ps(__m128 __a, __m128 __b) { - return (__m128)((__v4si)__a ^ (__v4si)__b); + return (__m128)((__v4su)__a ^ (__v4su)__b); } +/// \brief Compares two 32-bit float values in the low-order bits of both +/// operands for equality and returns the result of the comparison in the +/// low-order bits of a vector [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPEQSS / CMPEQSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpeq_ss(__m128 __a, __m128 __b) { - return (__m128)__builtin_ia32_cmpeqss(__a, __b); + return (__m128)__builtin_ia32_cmpeqss((__v4sf)__a, (__v4sf)__b); } +/// \brief Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] for equality. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPEQPS / CMPEQPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpeq_ps(__m128 __a, __m128 __b) { - return (__m128)__builtin_ia32_cmpeqps(__a, __b); + return (__m128)__builtin_ia32_cmpeqps((__v4sf)__a, (__v4sf)__b); } +/// \brief Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is less than the +/// corresponding value in the second operand and returns the result of the +/// comparison in the low-order bits of a vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLTSS / CMPLTSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmplt_ss(__m128 __a, __m128 __b) { - return (__m128)__builtin_ia32_cmpltss(__a, __b); + return (__m128)__builtin_ia32_cmpltss((__v4sf)__a, (__v4sf)__b); } +/// \brief Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are less than those in the second operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLTPS / CMPLTPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmplt_ps(__m128 __a, __m128 __b) { - return (__m128)__builtin_ia32_cmpltps(__a, __b); + return (__m128)__builtin_ia32_cmpltps((__v4sf)__a, (__v4sf)__b); } +/// \brief Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is less than or +/// equal to the corresponding value in the second operand and returns the +/// result of the comparison in the low-order bits of a vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLESS / CMPLESS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmple_ss(__m128 __a, __m128 __b) { - return (__m128)__builtin_ia32_cmpless(__a, __b); + return (__m128)__builtin_ia32_cmpless((__v4sf)__a, (__v4sf)__b); } +/// \brief Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are less than or equal to those in the second operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLEPS / CMPLEPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmple_ps(__m128 __a, __m128 __b) { - return (__m128)__builtin_ia32_cmpleps(__a, __b); + return (__m128)__builtin_ia32_cmpleps((__v4sf)__a, (__v4sf)__b); } +/// \brief Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is greater than +/// the corresponding value in the second operand and returns the result of +/// the comparison in the low-order bits of a vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLTSS / CMPLTSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpgt_ss(__m128 __a, __m128 __b) { - return (__m128)__builtin_shufflevector(__a, - __builtin_ia32_cmpltss(__b, __a), + return (__m128)__builtin_shufflevector((__v4sf)__a, + (__v4sf)__builtin_ia32_cmpltss((__v4sf)__b, (__v4sf)__a), 4, 1, 2, 3); } +/// \brief Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are greater than those in the second operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLTPS / CMPLTPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpgt_ps(__m128 __a, __m128 __b) { - return (__m128)__builtin_ia32_cmpltps(__b, __a); + return (__m128)__builtin_ia32_cmpltps((__v4sf)__b, (__v4sf)__a); } +/// \brief Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is greater than +/// or equal to the corresponding value in the second operand and returns +/// the result of the comparison in the low-order bits of a vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLESS / CMPLESS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpge_ss(__m128 __a, __m128 __b) { - return (__m128)__builtin_shufflevector(__a, - __builtin_ia32_cmpless(__b, __a), + return (__m128)__builtin_shufflevector((__v4sf)__a, + (__v4sf)__builtin_ia32_cmpless((__v4sf)__b, (__v4sf)__a), 4, 1, 2, 3); } +/// \brief Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are greater than or equal to those in the second operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLEPS / CMPLEPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpge_ps(__m128 __a, __m128 __b) { - return (__m128)__builtin_ia32_cmpleps(__b, __a); + return (__m128)__builtin_ia32_cmpleps((__v4sf)__b, (__v4sf)__a); } +/// \brief Compares two 32-bit float values in the low-order bits of both +/// operands for inequality and returns the result of the comparison in the +/// low-order bits of a vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNEQSS / CMPNEQSS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpneq_ss(__m128 __a, __m128 __b) { - return (__m128)__builtin_ia32_cmpneqss(__a, __b); + return (__m128)__builtin_ia32_cmpneqss((__v4sf)__a, (__v4sf)__b); } +/// \brief Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] for inequality. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNEQPS / CMPNEQPS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpneq_ps(__m128 __a, __m128 __b) { - return (__m128)__builtin_ia32_cmpneqps(__a, __b); + return (__m128)__builtin_ia32_cmpneqps((__v4sf)__a, (__v4sf)__b); } +/// \brief Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is not less than +/// the corresponding value in the second operand and returns the result of +/// the comparison in the low-order bits of a vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLTSS / CMPNLTSS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpnlt_ss(__m128 __a, __m128 __b) { - return (__m128)__builtin_ia32_cmpnltss(__a, __b); + return (__m128)__builtin_ia32_cmpnltss((__v4sf)__a, (__v4sf)__b); } +/// \brief Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are not less than those in the second operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLTPS / CMPNLTPS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpnlt_ps(__m128 __a, __m128 __b) { - return (__m128)__builtin_ia32_cmpnltps(__a, __b); + return (__m128)__builtin_ia32_cmpnltps((__v4sf)__a, (__v4sf)__b); } +/// \brief Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is not less than +/// or equal to the corresponding value in the second operand and returns +/// the result of the comparison in the low-order bits of a vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLESS / CMPNLESS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpnle_ss(__m128 __a, __m128 __b) { - return (__m128)__builtin_ia32_cmpnless(__a, __b); + return (__m128)__builtin_ia32_cmpnless((__v4sf)__a, (__v4sf)__b); } +/// \brief Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are not less than or equal to those in the second operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLEPS / CMPNLEPS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpnle_ps(__m128 __a, __m128 __b) { - return (__m128)__builtin_ia32_cmpnleps(__a, __b); + return (__m128)__builtin_ia32_cmpnleps((__v4sf)__a, (__v4sf)__b); } +/// \brief Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is not greater +/// than the corresponding value in the second operand and returns the +/// result of the comparison in the low-order bits of a vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLTSS / CMPNLTSS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpngt_ss(__m128 __a, __m128 __b) { - return (__m128)__builtin_shufflevector(__a, - __builtin_ia32_cmpnltss(__b, __a), + return (__m128)__builtin_shufflevector((__v4sf)__a, + (__v4sf)__builtin_ia32_cmpnltss((__v4sf)__b, (__v4sf)__a), 4, 1, 2, 3); } +/// \brief Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are not greater than those in the second operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLTPS / CMPNLTPS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpngt_ps(__m128 __a, __m128 __b) { - return (__m128)__builtin_ia32_cmpnltps(__b, __a); + return (__m128)__builtin_ia32_cmpnltps((__v4sf)__b, (__v4sf)__a); } +/// \brief Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is not greater +/// than or equal to the corresponding value in the second operand and +/// returns the result of the comparison in the low-order bits of a vector +/// of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLESS / CMPNLESS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpnge_ss(__m128 __a, __m128 __b) { - return (__m128)__builtin_shufflevector(__a, - __builtin_ia32_cmpnless(__b, __a), + return (__m128)__builtin_shufflevector((__v4sf)__a, + (__v4sf)__builtin_ia32_cmpnless((__v4sf)__b, (__v4sf)__a), 4, 1, 2, 3); } +/// \brief Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are not greater than or equal to those in the second operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLEPS / CMPNLEPS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpnge_ps(__m128 __a, __m128 __b) { - return (__m128)__builtin_ia32_cmpnleps(__b, __a); + return (__m128)__builtin_ia32_cmpnleps((__v4sf)__b, (__v4sf)__a); } +/// \brief Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is ordered with +/// respect to the corresponding value in the second operand and returns the +/// result of the comparison in the low-order bits of a vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPORDSS / CMPORDSS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpord_ss(__m128 __a, __m128 __b) { - return (__m128)__builtin_ia32_cmpordss(__a, __b); + return (__m128)__builtin_ia32_cmpordss((__v4sf)__a, (__v4sf)__b); } +/// \brief Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are ordered with respect to those in the second operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPORDPS / CMPORDPS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpord_ps(__m128 __a, __m128 __b) { - return (__m128)__builtin_ia32_cmpordps(__a, __b); + return (__m128)__builtin_ia32_cmpordps((__v4sf)__a, (__v4sf)__b); } +/// \brief Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is unordered +/// with respect to the corresponding value in the second operand and +/// returns the result of the comparison in the low-order bits of a vector +/// of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPUNORDSS / CMPUNORDSS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpunord_ss(__m128 __a, __m128 __b) { - return (__m128)__builtin_ia32_cmpunordss(__a, __b); + return (__m128)__builtin_ia32_cmpunordss((__v4sf)__a, (__v4sf)__b); } +/// \brief Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are unordered with respect to those in the second operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPUNORDPS / CMPUNORDPS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpunord_ps(__m128 __a, __m128 __b) { - return (__m128)__builtin_ia32_cmpunordps(__a, __b); + return (__m128)__builtin_ia32_cmpunordps((__v4sf)__a, (__v4sf)__b); } +/// \brief Compares two 32-bit float values in the low-order bits of both +/// operands for equality and returns the result of the comparison. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISS / COMISS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_ss(__m128 __a, __m128 __b) { - return __builtin_ia32_comieq(__a, __b); + return __builtin_ia32_comieq((__v4sf)__a, (__v4sf)__b); } +/// \brief Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the first operand is less than the second +/// operand and returns the result of the comparison. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISS / COMISS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_ss(__m128 __a, __m128 __b) { - return __builtin_ia32_comilt(__a, __b); + return __builtin_ia32_comilt((__v4sf)__a, (__v4sf)__b); } +/// \brief Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the first operand is less than or equal to the +/// second operand and returns the result of the comparison. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISS / COMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_ss(__m128 __a, __m128 __b) { - return __builtin_ia32_comile(__a, __b); + return __builtin_ia32_comile((__v4sf)__a, (__v4sf)__b); } +/// \brief Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the first operand is greater than the second +/// operand and returns the result of the comparison. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISS / COMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_ss(__m128 __a, __m128 __b) { - return __builtin_ia32_comigt(__a, __b); + return __builtin_ia32_comigt((__v4sf)__a, (__v4sf)__b); } +/// \brief Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the first operand is greater than or equal to +/// the second operand and returns the result of the comparison. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISS / COMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_ss(__m128 __a, __m128 __b) { - return __builtin_ia32_comige(__a, __b); + return __builtin_ia32_comige((__v4sf)__a, (__v4sf)__b); } +/// \brief Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the first operand is not equal to the second +/// operand and returns the result of the comparison. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISS / COMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comineq_ss(__m128 __a, __m128 __b) { - return __builtin_ia32_comineq(__a, __b); + return __builtin_ia32_comineq((__v4sf)__a, (__v4sf)__b); } +/// \brief Performs an unordered comparison of two 32-bit float values using +/// the low-order bits of both operands to determine equality and returns +/// the result of the comparison. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISS / UCOMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_ss(__m128 __a, __m128 __b) { - return __builtin_ia32_ucomieq(__a, __b); + return __builtin_ia32_ucomieq((__v4sf)__a, (__v4sf)__b); } +/// \brief Performs an unordered comparison of two 32-bit float values using +/// the low-order bits of both operands to determine if the first operand is +/// less than the second operand and returns the result of the comparison. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISS / UCOMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_ss(__m128 __a, __m128 __b) { - return __builtin_ia32_ucomilt(__a, __b); + return __builtin_ia32_ucomilt((__v4sf)__a, (__v4sf)__b); } +/// \brief Performs an unordered comparison of two 32-bit float values using +/// the low-order bits of both operands to determine if the first operand is +/// less than or equal to the second operand and returns the result of the +/// comparison. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISS / UCOMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_ss(__m128 __a, __m128 __b) { - return __builtin_ia32_ucomile(__a, __b); + return __builtin_ia32_ucomile((__v4sf)__a, (__v4sf)__b); } +/// \brief Performs an unordered comparison of two 32-bit float values using +/// the low-order bits of both operands to determine if the first operand is +/// greater than the second operand and returns the result of the +/// comparison. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISS / UCOMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_ss(__m128 __a, __m128 __b) { - return __builtin_ia32_ucomigt(__a, __b); + return __builtin_ia32_ucomigt((__v4sf)__a, (__v4sf)__b); } +/// \brief Performs an unordered comparison of two 32-bit float values using +/// the low-order bits of both operands to determine if the first operand is +/// greater than or equal to the second operand and returns the result of +/// the comparison. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISS / UCOMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_ss(__m128 __a, __m128 __b) { - return __builtin_ia32_ucomige(__a, __b); + return __builtin_ia32_ucomige((__v4sf)__a, (__v4sf)__b); } +/// \brief Performs an unordered comparison of two 32-bit float values using +/// the low-order bits of both operands to determine inequality and returns +/// the result of the comparison. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISS / UCOMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomineq_ss(__m128 __a, __m128 __b) { - return __builtin_ia32_ucomineq(__a, __b); + return __builtin_ia32_ucomineq((__v4sf)__a, (__v4sf)__b); } +/// \brief Converts a float value contained in the lower 32 bits of a vector of +/// [4 x float] into a 32-bit integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSS2SI / CVTSS2SI +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the conversion. +/// \returns A 32-bit integer containing the converted value. static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtss_si32(__m128 __a) { - return __builtin_ia32_cvtss2si(__a); + return __builtin_ia32_cvtss2si((__v4sf)__a); } +/// \brief Converts a float value contained in the lower 32 bits of a vector of +/// [4 x float] into a 32-bit integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSS2SI / CVTSS2SI +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the conversion. +/// \returns A 32-bit integer containing the converted value. static __inline__ int __DEFAULT_FN_ATTRS _mm_cvt_ss2si(__m128 __a) { @@ -420,56 +1302,168 @@ _mm_cvt_ss2si(__m128 __a) #ifdef __x86_64__ +/// \brief Converts a float value contained in the lower 32 bits of a vector of +/// [4 x float] into a 64-bit integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSS2SI / CVTSS2SI +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the conversion. +/// \returns A 64-bit integer containing the converted value. static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvtss_si64(__m128 __a) { - return __builtin_ia32_cvtss2si64(__a); + return __builtin_ia32_cvtss2si64((__v4sf)__a); } #endif +/// \brief Converts two low-order float values in a 128-bit vector of +/// [4 x float] into a 64-bit vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPS2PI instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 64-bit integer vector containing the converted values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_cvtps_pi32(__m128 __a) { - return (__m64)__builtin_ia32_cvtps2pi(__a); + return (__m64)__builtin_ia32_cvtps2pi((__v4sf)__a); } +/// \brief Converts two low-order float values in a 128-bit vector of +/// [4 x float] into a 64-bit vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPS2PI instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 64-bit integer vector containing the converted values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_cvt_ps2pi(__m128 __a) { return _mm_cvtps_pi32(__a); } +/// \brief Converts a float value contained in the lower 32 bits of a vector of +/// [4 x float] into a 32-bit integer, truncating the result when it is +/// inexact. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTSS2SI / CVTTSS2SI +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the conversion. +/// \returns A 32-bit integer containing the converted value. static __inline__ int __DEFAULT_FN_ATTRS _mm_cvttss_si32(__m128 __a) { - return __a[0]; + return __builtin_ia32_cvttss2si((__v4sf)__a); } +/// \brief Converts a float value contained in the lower 32 bits of a vector of +/// [4 x float] into a 32-bit integer, truncating the result when it is +/// inexact. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTSS2SI / CVTTSS2SI +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the conversion. +/// \returns A 32-bit integer containing the converted value. static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtt_ss2si(__m128 __a) { return _mm_cvttss_si32(__a); } +#ifdef __x86_64__ +/// \brief Converts a float value contained in the lower 32 bits of a vector of +/// [4 x float] into a 64-bit integer, truncating the result when it is +/// inexact. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTSS2SI / CVTTSS2SI +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the conversion. +/// \returns A 64-bit integer containing the converted value. static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvttss_si64(__m128 __a) { - return __a[0]; + return __builtin_ia32_cvttss2si64((__v4sf)__a); } +#endif +/// \brief Converts two low-order float values in a 128-bit vector of +/// [4 x float] into a 64-bit vector of [2 x i32], truncating the result +/// when it is inexact. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTTPS2PI / VTTPS2PI +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 64-bit integer vector containing the converted values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_cvttps_pi32(__m128 __a) { - return (__m64)__builtin_ia32_cvttps2pi(__a); + return (__m64)__builtin_ia32_cvttps2pi((__v4sf)__a); } +/// \brief Converts two low-order float values in a 128-bit vector of [4 x +/// float] into a 64-bit vector of [2 x i32], truncating the result when it +/// is inexact. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTTPS2PI instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 64-bit integer vector containing the converted values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_cvtt_ps2pi(__m128 __a) { return _mm_cvttps_pi32(__a); } +/// \brief Converts a 32-bit signed integer value into a floating point value +/// and writes it to the lower 32 bits of the destination. The remaining +/// higher order elements of the destination vector are copied from the +/// corresponding elements in the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSI2SS / CVTSI2SS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 32-bit signed integer operand containing the value to be converted. +/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the +/// converted value of the second operand. The upper 96 bits are copied from +/// the upper 96 bits of the first operand. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtsi32_ss(__m128 __a, int __b) { @@ -477,6 +1471,22 @@ _mm_cvtsi32_ss(__m128 __a, int __b) return __a; } +/// \brief Converts a 32-bit signed integer value into a floating point value +/// and writes it to the lower 32 bits of the destination. The remaining +/// higher order elements of the destination are copied from the +/// corresponding elements in the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSI2SS / CVTSI2SS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 32-bit signed integer operand containing the value to be converted. +/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the +/// converted value of the second operand. The upper 96 bits are copied from +/// the upper 96 bits of the first operand. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvt_si2ss(__m128 __a, int __b) { @@ -485,6 +1495,22 @@ _mm_cvt_si2ss(__m128 __a, int __b) #ifdef __x86_64__ +/// \brief Converts a 64-bit signed integer value into a floating point value +/// and writes it to the lower 32 bits of the destination. The remaining +/// higher order elements of the destination are copied from the +/// corresponding elements in the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSI2SS / CVTSI2SS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 64-bit signed integer operand containing the value to be converted. +/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the +/// converted value of the second operand. The upper 96 bits are copied from +/// the upper 96 bits of the first operand. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtsi64_ss(__m128 __a, long long __b) { @@ -494,24 +1520,84 @@ _mm_cvtsi64_ss(__m128 __a, long long __b) #endif +/// \brief Converts two elements of a 64-bit vector of [2 x i32] into two +/// floating point values and writes them to the lower 64-bits of the +/// destination. The remaining higher order elements of the destination are +/// copied from the corresponding elements in the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPI2PS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 64-bit vector of [2 x i32]. The elements in this vector are converted +/// and written to the corresponding low-order elements in the destination. +/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the +/// converted value of the second operand. The upper 64 bits are copied from +/// the upper 64 bits of the first operand. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtpi32_ps(__m128 __a, __m64 __b) { - return __builtin_ia32_cvtpi2ps(__a, (__v2si)__b); + return __builtin_ia32_cvtpi2ps((__v4sf)__a, (__v2si)__b); } +/// \brief Converts two elements of a 64-bit vector of [2 x i32] into two +/// floating point values and writes them to the lower 64-bits of the +/// destination. The remaining higher order elements of the destination are +/// copied from the corresponding elements in the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPI2PS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 64-bit vector of [2 x i32]. The elements in this vector are converted +/// and written to the corresponding low-order elements in the destination. +/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the +/// converted value from the second operand. The upper 64 bits are copied +/// from the upper 64 bits of the first operand. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvt_pi2ps(__m128 __a, __m64 __b) { return _mm_cvtpi32_ps(__a, __b); } +/// \brief Extracts a float value contained in the lower 32 bits of a vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSS / MOVSS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the extraction. +/// \returns A 32-bit float containing the extracted value. static __inline__ float __DEFAULT_FN_ATTRS _mm_cvtss_f32(__m128 __a) { return __a[0]; } +/// \brief Loads two packed float values from the address \a __p into the +/// high-order bits of a 128-bit vector of [4 x float]. The low-order bits +/// are copied from the low-order bits of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVHPD / MOVHPD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. Bits [63:0] are written to bits [63:0] +/// of the destination. +/// \param __p +/// A pointer to two packed float values. Bits [63:0] are written to bits +/// [127:64] of the destination. +/// \returns A 128-bit vector of [4 x float] containing the moved values. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_loadh_pi(__m128 __a, const __m64 *__p) { @@ -524,6 +1610,21 @@ _mm_loadh_pi(__m128 __a, const __m64 *__p) return __builtin_shufflevector(__a, __bb, 0, 1, 4, 5); } +/// \brief Loads two packed float values from the address \a __p into the +/// low-order bits of a 128-bit vector of [4 x float]. The high-order bits +/// are copied from the high-order bits of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVLPD / MOVLPD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. Bits [127:64] are written to bits +/// [127:64] of the destination. +/// \param __p +/// A pointer to two packed float values. Bits [63:0] are written to bits +/// [63:0] of the destination. +/// \returns A 128-bit vector of [4 x float] containing the moved values. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_loadl_pi(__m128 __a, const __m64 *__p) { @@ -536,6 +1637,21 @@ _mm_loadl_pi(__m128 __a, const __m64 *__p) return __builtin_shufflevector(__a, __bb, 4, 5, 2, 3); } +/// \brief Constructs a 128-bit floating-point vector of [4 x float]. The lower +/// 32 bits of the vector are initialized with the single-precision +/// floating-point value loaded from a specified memory location. The upper +/// 96 bits are set to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSS / MOVSS instruction. +/// +/// \param __p +/// A pointer to a 32-bit memory location containing a single-precision +/// floating-point value. +/// \returns An initialized 128-bit floating-point vector of [4 x float]. The +/// lower 32 bits contain the value loaded from the memory location. The +/// upper 96 bits are set to zero. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_load_ss(const float *__p) { @@ -546,6 +1662,18 @@ _mm_load_ss(const float *__p) return (__m128){ __u, 0, 0, 0 }; } +/// \brief Loads a 32-bit float value and duplicates it to all four vector +/// elements of a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSS / MOVSS + shuffling +/// instruction. +/// +/// \param __p +/// A pointer to a float value to be loaded and duplicated. +/// \returns A 128-bit vector of [4 x float] containing the loaded and +/// duplicated values. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_load1_ps(const float *__p) { @@ -558,12 +1686,34 @@ _mm_load1_ps(const float *__p) #define _mm_load_ps1(p) _mm_load1_ps(p) +/// \brief Loads a 128-bit floating-point vector of [4 x float] from an aligned +/// memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPS / MOVAPS instruction. +/// +/// \param __p +/// A pointer to a 128-bit memory location. The address of the memory +/// location has to be 128-bit aligned. +/// \returns A 128-bit vector of [4 x float] containing the loaded valus. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_load_ps(const float *__p) { return *(__m128*)__p; } +/// \brief Loads a 128-bit floating-point vector of [4 x float] from an +/// unaligned memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPS / MOVUPS instruction. +/// +/// \param __p +/// A pointer to a 128-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \returns A 128-bit vector of [4 x float] containing the loaded values. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_loadu_ps(const float *__p) { @@ -573,19 +1723,71 @@ _mm_loadu_ps(const float *__p) return ((struct __loadu_ps*)__p)->__v; } +/// \brief Loads four packed float values, in reverse order, from an aligned +/// memory location to 32-bit elements in a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPS / MOVAPS + shuffling +/// instruction. +/// +/// \param __p +/// A pointer to a 128-bit memory location. The address of the memory +/// location has to be 128-bit aligned. +/// \returns A 128-bit vector of [4 x float] containing the moved values, loaded +/// in reverse order. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_loadr_ps(const float *__p) { __m128 __a = _mm_load_ps(__p); - return __builtin_shufflevector(__a, __a, 3, 2, 1, 0); + return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 3, 2, 1, 0); } +/// \brief Create a 128-bit vector of [4 x float] with undefined values. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \returns A 128-bit vector of [4 x float] containing undefined values. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_undefined_ps(void) +{ + return (__m128)__builtin_ia32_undef128(); +} + +/// \brief Constructs a 128-bit floating-point vector of [4 x float]. The lower +/// 32 bits of the vector are initialized with the specified single-precision +/// floating-point value. The upper 96 bits are set to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSS / MOVSS instruction. +/// +/// \param __w +/// A single-precision floating-point value used to initialize the lower 32 +/// bits of the result. +/// \returns An initialized 128-bit floating-point vector of [4 x float]. The +/// lower 32 bits contain the value provided in the source operand. The +/// upper 96 bits are set to zero. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_set_ss(float __w) { return (__m128){ __w, 0, 0, 0 }; } +/// \brief Constructs a 128-bit floating-point vector of [4 x float], with each +/// of the four single-precision floating-point vector elements set to the +/// specified single-precision floating-point value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPS / PERMILPS instruction. +/// +/// \param __w +/// A single-precision floating-point value used to initialize each vector +/// element of the result. +/// \returns An initialized 128-bit floating-point vector of [4 x float]. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_set1_ps(float __w) { @@ -593,42 +1795,139 @@ _mm_set1_ps(float __w) } /* Microsoft specific. */ +/// \brief Constructs a 128-bit floating-point vector of [4 x float], with each +/// of the four single-precision floating-point vector elements set to the +/// specified single-precision floating-point value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPS / PERMILPS instruction. +/// +/// \param __w +/// A single-precision floating-point value used to initialize each vector +/// element of the result. +/// \returns An initialized 128-bit floating-point vector of [4 x float]. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_set_ps1(float __w) { return _mm_set1_ps(__w); } +/// \brief Constructs a 128-bit floating-point vector of [4 x float] +/// initialized with the specified single-precision floating-point values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __z +/// A single-precision floating-point value used to initialize bits [127:96] +/// of the result. +/// \param __y +/// A single-precision floating-point value used to initialize bits [95:64] +/// of the result. +/// \param __x +/// A single-precision floating-point value used to initialize bits [63:32] +/// of the result. +/// \param __w +/// A single-precision floating-point value used to initialize bits [31:0] +/// of the result. +/// \returns An initialized 128-bit floating-point vector of [4 x float]. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_set_ps(float __z, float __y, float __x, float __w) { return (__m128){ __w, __x, __y, __z }; } +/// \brief Constructs a 128-bit floating-point vector of [4 x float], +/// initialized in reverse order with the specified 32-bit single-precision +/// float-point values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __z +/// A single-precision floating-point value used to initialize bits [31:0] +/// of the result. +/// \param __y +/// A single-precision floating-point value used to initialize bits [63:32] +/// of the result. +/// \param __x +/// A single-precision floating-point value used to initialize bits [95:64] +/// of the result. +/// \param __w +/// A single-precision floating-point value used to initialize bits [127:96] +/// of the result. +/// \returns An initialized 128-bit floating-point vector of [4 x float]. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_setr_ps(float __z, float __y, float __x, float __w) { return (__m128){ __z, __y, __x, __w }; } +/// \brief Constructs a 128-bit floating-point vector of [4 x float] initialized +/// to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPS / XORPS instruction. +/// +/// \returns An initialized 128-bit floating-point vector of [4 x float] with +/// all elements set to zero. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_setzero_ps(void) { return (__m128){ 0, 0, 0, 0 }; } +/// \brief Stores the upper 64 bits of a 128-bit vector of [4 x float] to a +/// memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPEXTRQ / MOVQ instruction. +/// +/// \param __p +/// A pointer to a 64-bit memory location. +/// \param __a +/// A 128-bit vector of [4 x float] containing the values to be stored. static __inline__ void __DEFAULT_FN_ATTRS _mm_storeh_pi(__m64 *__p, __m128 __a) { - __builtin_ia32_storehps((__v2si *)__p, __a); + __builtin_ia32_storehps((__v2si *)__p, (__v4sf)__a); } +/// \brief Stores the lower 64 bits of a 128-bit vector of [4 x float] to a +/// memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVLPS / MOVLPS instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the float values. +/// \param __a +/// A 128-bit vector of [4 x float] containing the values to be stored. static __inline__ void __DEFAULT_FN_ATTRS _mm_storel_pi(__m64 *__p, __m128 __a) { - __builtin_ia32_storelps((__v2si *)__p, __a); + __builtin_ia32_storelps((__v2si *)__p, (__v4sf)__a); } +/// \brief Stores the lower 32 bits of a 128-bit vector of [4 x float] to a +/// memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSS / MOVSS instruction. +/// +/// \param __p +/// A pointer to a 32-bit memory location. +/// \param __a +/// A 128-bit vector of [4 x float] containing the value to be stored. static __inline__ void __DEFAULT_FN_ATTRS _mm_store_ss(float *__p, __m128 __a) { @@ -638,35 +1937,101 @@ _mm_store_ss(float *__p, __m128 __a) ((struct __mm_store_ss_struct*)__p)->__u = __a[0]; } +/// \brief Stores a 128-bit vector of [4 x float] to an unaligned memory +/// location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPS / MOVUPS instruction. +/// +/// \param __p +/// A pointer to a 128-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \param __a +/// A 128-bit vector of [4 x float] containing the values to be stored. static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_ps(float *__p, __m128 __a) { - __builtin_ia32_storeups(__p, __a); -} - -static __inline__ void __DEFAULT_FN_ATTRS -_mm_store1_ps(float *__p, __m128 __a) -{ - __a = __builtin_shufflevector(__a, __a, 0, 0, 0, 0); - _mm_storeu_ps(__p, __a); -} - -static __inline__ void __DEFAULT_FN_ATTRS -_mm_store_ps1(float *__p, __m128 __a) -{ - return _mm_store1_ps(__p, __a); + struct __storeu_ps { + __m128 __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_ps*)__p)->__v = __a; } +/// \brief Stores a 128-bit vector of [4 x float] into an aligned memory +/// location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPS / MOVAPS instruction. +/// +/// \param __p +/// A pointer to a 128-bit memory location. The address of the memory +/// location has to be 16-byte aligned. +/// \param __a +/// A 128-bit vector of [4 x float] containing the values to be stored. static __inline__ void __DEFAULT_FN_ATTRS _mm_store_ps(float *__p, __m128 __a) { - *(__m128 *)__p = __a; + *(__m128*)__p = __a; } +/// \brief Stores the lower 32 bits of a 128-bit vector of [4 x float] into +/// four contiguous elements in an aligned memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to VMOVAPS / MOVAPS + shuffling +/// instruction. +/// +/// \param __p +/// A pointer to a 128-bit memory location. +/// \param __a +/// A 128-bit vector of [4 x float] whose lower 32 bits are stored to each +/// of the four contiguous elements pointed by \a __p. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_store1_ps(float *__p, __m128 __a) +{ + __a = __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 0, 0, 0); + _mm_store_ps(__p, __a); +} + +/// \brief Stores the lower 32 bits of a 128-bit vector of [4 x float] into +/// four contiguous elements in an aligned memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to VMOVAPS / MOVAPS + shuffling +/// instruction. +/// +/// \param __p +/// A pointer to a 128-bit memory location. +/// \param __a +/// A 128-bit vector of [4 x float] whose lower 32 bits are stored to each +/// of the four contiguous elements pointed by \a __p. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_store_ps1(float *__p, __m128 __a) +{ + return _mm_store1_ps(__p, __a); +} + +/// \brief Stores float values from a 128-bit vector of [4 x float] to an +/// aligned memory location in reverse order. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPS / MOVAPS + shuffling +/// instruction. +/// +/// \param __p +/// A pointer to a 128-bit memory location. The address of the memory +/// location has to be 128-bit aligned. +/// \param __a +/// A 128-bit vector of [4 x float] containing the values to be stored. static __inline__ void __DEFAULT_FN_ATTRS _mm_storer_ps(float *__p, __m128 __a) { - __a = __builtin_shufflevector(__a, __a, 3, 2, 1, 0); + __a = __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 3, 2, 1, 0); _mm_store_ps(__p, __a); } @@ -679,156 +2044,648 @@ _mm_storer_ps(float *__p, __m128 __a) /* FIXME: We have to #define this because "sel" must be a constant integer, and Sema doesn't do any form of constant propagation yet. */ +/// \brief Loads one cache line of data from the specified address to a location +/// closer to the processor. +/// +/// \headerfile +/// +/// \code +/// void _mm_prefetch(const void * a, const int sel); +/// \endcode +/// +/// This intrinsic corresponds to the PREFETCHNTA instruction. +/// +/// \param a +/// A pointer to a memory location containing a cache line of data. +/// \param sel +/// A predefined integer constant specifying the type of prefetch +/// operation: \n +/// _MM_HINT_NTA: Move data using the non-temporal access (NTA) hint. The +/// PREFETCHNTA instruction will be generated. \n +/// _MM_HINT_T0: Move data using the T0 hint. The PREFETCHT0 instruction will +/// be generated. \n +/// _MM_HINT_T1: Move data using the T1 hint. The PREFETCHT1 instruction will +/// be generated. \n +/// _MM_HINT_T2: Move data using the T2 hint. The PREFETCHT2 instruction will +/// be generated. #define _mm_prefetch(a, sel) (__builtin_prefetch((void *)(a), 0, (sel))) #endif +/// \brief Stores a 64-bit integer in the specified aligned memory location. To +/// minimize caching, the data is flagged as non-temporal (unlikely to be +/// used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVNTQ instruction. +/// +/// \param __p +/// A pointer to an aligned memory location used to store the register value. +/// \param __a +/// A 64-bit integer containing the value to be stored. static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_pi(__m64 *__p, __m64 __a) { __builtin_ia32_movntq(__p, __a); } +/// \brief Moves packed float values from a 128-bit vector of [4 x float] to a +/// 128-bit aligned memory location. To minimize caching, the data is flagged +/// as non-temporal (unlikely to be used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVNTPS / MOVNTPS instruction. +/// +/// \param __p +/// A pointer to a 128-bit aligned memory location that will receive the +/// integer values. +/// \param __a +/// A 128-bit vector of [4 x float] containing the values to be moved. static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_ps(float *__p, __m128 __a) { - __builtin_ia32_movntps(__p, __a); + __builtin_nontemporal_store((__v4sf)__a, (__v4sf*)__p); } -static __inline__ void __DEFAULT_FN_ATTRS -_mm_sfence(void) -{ - __builtin_ia32_sfence(); -} +#if defined(__cplusplus) +extern "C" { +#endif -static __inline__ int __DEFAULT_FN_ATTRS -_mm_extract_pi16(__m64 __a, int __n) -{ - __v4hi __b = (__v4hi)__a; - return (unsigned short)__b[__n & 3]; -} +/// \brief Forces strong memory ordering (serialization) between store +/// instructions preceding this instruction and store instructions following +/// this instruction, ensuring the system completes all previous stores +/// before executing subsequent stores. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the SFENCE instruction. +/// +void _mm_sfence(void); -static __inline__ __m64 __DEFAULT_FN_ATTRS -_mm_insert_pi16(__m64 __a, int __d, int __n) -{ - __v4hi __b = (__v4hi)__a; - __b[__n & 3] = __d; - return (__m64)__b; -} +#if defined(__cplusplus) +} // extern "C" +#endif +/// \brief Extracts 16-bit element from a 64-bit vector of [4 x i16] and +/// returns it, as specified by the immediate integer operand. +/// +/// \headerfile +/// +/// \code +/// void _mm_extract_pi(__m64 a, int n); +/// \endcode +/// +/// This intrinsic corresponds to the VPEXTRW / PEXTRW instruction. +/// +/// \param a +/// A 64-bit vector of [4 x i16]. +/// \param n +/// An immediate integer operand that determines which bits are extracted: \n +/// 0: Bits [15:0] are copied to the destination. \n +/// 1: Bits [31:16] are copied to the destination. \n +/// 2: Bits [47:32] are copied to the destination. \n +/// 3: Bits [63:48] are copied to the destination. +/// \returns A 16-bit integer containing the extracted 16 bits of packed data. +#define _mm_extract_pi16(a, n) __extension__ ({ \ + (int)__builtin_ia32_vec_ext_v4hi((__m64)a, (int)n); }) + +/// \brief Copies data from the 64-bit vector of [4 x i16] to the destination, +/// and inserts the lower 16-bits of an integer operand at the 16-bit offset +/// specified by the immediate operand \a n. +/// +/// \headerfile +/// +/// \code +/// void _mm_insert_pi(__m64 a, int d, int n); +/// \endcode +/// +/// This intrinsic corresponds to the VPINSRW / PINSRW instruction. +/// +/// \param a +/// A 64-bit vector of [4 x i16]. +/// \param d +/// An integer. The lower 16-bit value from this operand is written to the +/// destination at the offset specified by operand \a n. +/// \param n +/// An immediate integer operant that determines which the bits to be used +/// in the destination. \n +/// 0: Bits [15:0] are copied to the destination. \n +/// 1: Bits [31:16] are copied to the destination. \n +/// 2: Bits [47:32] are copied to the destination. \n +/// 3: Bits [63:48] are copied to the destination. \n +/// The remaining bits in the destination are copied from the corresponding +/// bits in operand \a a. +/// \returns A 64-bit integer vector containing the copied packed data from the +/// operands. +#define _mm_insert_pi16(a, d, n) __extension__ ({ \ + (__m64)__builtin_ia32_vec_set_v4hi((__m64)a, (int)d, (int)n); }) + +/// \brief Compares each of the corresponding packed 16-bit integer values of +/// the 64-bit integer vectors, and writes the greater value to the +/// corresponding bits in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMAXSW instruction. +/// +/// \param __a +/// A 64-bit integer vector containing one of the source operands. +/// \param __b +/// A 64-bit integer vector containing one of the source operands. +/// \returns A 64-bit integer vector containing the comparison results. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_max_pi16(__m64 __a, __m64 __b) { return (__m64)__builtin_ia32_pmaxsw((__v4hi)__a, (__v4hi)__b); } +/// \brief Compares each of the corresponding packed 8-bit unsigned integer +/// values of the 64-bit integer vectors, and writes the greater value to the +/// corresponding bits in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMAXUB instruction. +/// +/// \param __a +/// A 64-bit integer vector containing one of the source operands. +/// \param __b +/// A 64-bit integer vector containing one of the source operands. +/// \returns A 64-bit integer vector containing the comparison results. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_max_pu8(__m64 __a, __m64 __b) { return (__m64)__builtin_ia32_pmaxub((__v8qi)__a, (__v8qi)__b); } +/// \brief Compares each of the corresponding packed 16-bit integer values of +/// the 64-bit integer vectors, and writes the lesser value to the +/// corresponding bits in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMINSW instruction. +/// +/// \param __a +/// A 64-bit integer vector containing one of the source operands. +/// \param __b +/// A 64-bit integer vector containing one of the source operands. +/// \returns A 64-bit integer vector containing the comparison results. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_min_pi16(__m64 __a, __m64 __b) { return (__m64)__builtin_ia32_pminsw((__v4hi)__a, (__v4hi)__b); } +/// \brief Compares each of the corresponding packed 8-bit unsigned integer +/// values of the 64-bit integer vectors, and writes the lesser value to the +/// corresponding bits in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMINUB instruction. +/// +/// \param __a +/// A 64-bit integer vector containing one of the source operands. +/// \param __b +/// A 64-bit integer vector containing one of the source operands. +/// \returns A 64-bit integer vector containing the comparison results. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_min_pu8(__m64 __a, __m64 __b) { return (__m64)__builtin_ia32_pminub((__v8qi)__a, (__v8qi)__b); } +/// \brief Takes the most significant bit from each 8-bit element in a 64-bit +/// integer vector to create a 16-bit mask value. Zero-extends the value to +/// 32-bit integer and writes it to the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMOVMSKB instruction. +/// +/// \param __a +/// A 64-bit integer vector containing the values with bits to be extracted. +/// \returns The most significant bit from each 8-bit element in the operand, +/// written to bits [15:0]. static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_pi8(__m64 __a) { return __builtin_ia32_pmovmskb((__v8qi)__a); } +/// \brief Multiplies packed 16-bit unsigned integer values and writes the +/// high-order 16 bits of each 32-bit product to the corresponding bits in +/// the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMULHUW instruction. +/// +/// \param __a +/// A 64-bit integer vector containing one of the source operands. +/// \param __b +/// A 64-bit integer vector containing one of the source operands. +/// \returns A 64-bit integer vector containing the products of both operands. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_mulhi_pu16(__m64 __a, __m64 __b) { return (__m64)__builtin_ia32_pmulhuw((__v4hi)__a, (__v4hi)__b); } +/// \brief Shuffles the 4 16-bit integers from a 64-bit integer vector to the +/// destination, as specified by the immediate value operand. +/// +/// \headerfile +/// +/// \code +/// __m64 _mm_shuffle_pi16(__m64 a, const int n); +/// \endcode +/// +/// This intrinsic corresponds to the PSHUFW instruction. +/// +/// \param a +/// A 64-bit integer vector containing the values to be shuffled. +/// \param n +/// An immediate value containing an 8-bit value specifying which elements to +/// copy from \a a. The destinations within the 64-bit destination are +/// assigned values as follows: \n +/// Bits [1:0] are used to assign values to bits [15:0] in the +/// destination. \n +/// Bits [3:2] are used to assign values to bits [31:16] in the +/// destination. \n +/// Bits [5:4] are used to assign values to bits [47:32] in the +/// destination. \n +/// Bits [7:6] are used to assign values to bits [63:48] in the +/// destination. \n +/// Bit value assignments: \n +/// 00: assigned from bits [15:0] of \a a. \n +/// 01: assigned from bits [31:16] of \a a. \n +/// 10: assigned from bits [47:32] of \a a. \n +/// 11: assigned from bits [63:48] of \a a. +/// \returns A 64-bit integer vector containing the shuffled values. #define _mm_shuffle_pi16(a, n) __extension__ ({ \ - __m64 __a = (a); \ - (__m64)__builtin_ia32_pshufw((__v4hi)__a, (n)); }) + (__m64)__builtin_ia32_pshufw((__v4hi)(__m64)(a), (n)); }) +/// \brief Conditionally copies the values from each 8-bit element in the first +/// 64-bit integer vector operand to the specified memory location, as +/// specified by the most significant bit in the corresponding element in the +/// second 64-bit integer vector operand. To minimize caching, the data is +/// flagged as non-temporal (unlikely to be used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MASKMOVQ instruction. +/// +/// \param __d +/// A 64-bit integer vector containing the values with elements to be copied. +/// \param __n +/// A 64-bit integer vector operand. The most significant bit from each 8-bit +/// element determines whether the corresponding element in operand \a __d +/// is copied. If the most significant bit of a given element is 1, the +/// corresponding element in operand \a __d is copied. +/// \param __p +/// A pointer to a 64-bit memory location that will receive the conditionally +/// copied integer values. The address of the memory location does not have +/// to be aligned. static __inline__ void __DEFAULT_FN_ATTRS _mm_maskmove_si64(__m64 __d, __m64 __n, char *__p) { __builtin_ia32_maskmovq((__v8qi)__d, (__v8qi)__n, __p); } +/// \brief Computes the rounded averages of the packed unsigned 8-bit integer +/// values and writes the averages to the corresponding bits in the +/// destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PAVGB instruction. +/// +/// \param __a +/// A 64-bit integer vector containing one of the source operands. +/// \param __b +/// A 64-bit integer vector containing one of the source operands. +/// \returns A 64-bit integer vector containing the averages of both operands. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_avg_pu8(__m64 __a, __m64 __b) { return (__m64)__builtin_ia32_pavgb((__v8qi)__a, (__v8qi)__b); } +/// \brief Computes the rounded averages of the packed unsigned 16-bit integer +/// values and writes the averages to the corresponding bits in the +/// destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PAVGW instruction. +/// +/// \param __a +/// A 64-bit integer vector containing one of the source operands. +/// \param __b +/// A 64-bit integer vector containing one of the source operands. +/// \returns A 64-bit integer vector containing the averages of both operands. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_avg_pu16(__m64 __a, __m64 __b) { return (__m64)__builtin_ia32_pavgw((__v4hi)__a, (__v4hi)__b); } +/// \brief Subtracts the corresponding 8-bit unsigned integer values of the two +/// 64-bit vector operands and computes the absolute value for each of the +/// difference. Then sum of the 8 absolute differences is written to the +/// bits [15:0] of the destination; the remaining bits [63:16] are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSADBW instruction. +/// +/// \param __a +/// A 64-bit integer vector containing one of the source operands. +/// \param __b +/// A 64-bit integer vector containing one of the source operands. +/// \returns A 64-bit integer vector whose lower 16 bits contain the sums of the +/// sets of absolute differences between both operands. The upper bits are +/// cleared. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_sad_pu8(__m64 __a, __m64 __b) { return (__m64)__builtin_ia32_psadbw((__v8qi)__a, (__v8qi)__b); } -static __inline__ unsigned int __DEFAULT_FN_ATTRS -_mm_getcsr(void) -{ - return __builtin_ia32_stmxcsr(); -} +#if defined(__cplusplus) +extern "C" { +#endif -static __inline__ void __DEFAULT_FN_ATTRS -_mm_setcsr(unsigned int __i) -{ - __builtin_ia32_ldmxcsr(__i); -} +/// \brief Returns the contents of the MXCSR register as a 32-bit unsigned +/// integer value. +/// +/// There are several groups of macros associated with this +/// intrinsic, including: +///
    +///
  • +/// For checking exception states: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO, +/// _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW, +/// _MM_EXCEPT_INEXACT. There is a convenience wrapper +/// _MM_GET_EXCEPTION_STATE(). +///
  • +///
  • +/// For checking exception masks: _MM_MASK_UNDERFLOW, _MM_MASK_OVERFLOW, +/// _MM_MASK_INVALID, _MM_MASK_DENORM, _MM_MASK_DIV_ZERO, _MM_MASK_INEXACT. +/// There is a convenience wrapper _MM_GET_EXCEPTION_MASK(). +///
  • +///
  • +/// For checking rounding modes: _MM_ROUND_NEAREST, _MM_ROUND_DOWN, +/// _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO. There is a convenience wrapper +/// _MM_GET_ROUNDING_MODE(x) where x is one of these macros. +///
  • +///
  • +/// For checking flush-to-zero mode: _MM_FLUSH_ZERO_ON, _MM_FLUSH_ZERO_OFF. +/// There is a convenience wrapper _MM_GET_FLUSH_ZERO_MODE(). +///
  • +///
  • +/// For checking denormals-are-zero mode: _MM_DENORMALS_ZERO_ON, +/// _MM_DENORMALS_ZERO_OFF. There is a convenience wrapper +/// _MM_GET_DENORMALS_ZERO_MODE(). +///
  • +///
+/// +/// For example, the expression below checks if an overflow exception has +/// occurred: +/// ( _mm_getcsr() & _MM_EXCEPT_OVERFLOW ) +/// +/// The following example gets the current rounding mode: +/// _MM_GET_ROUNDING_MODE() +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSTMXCSR / STMXCSR instruction. +/// +/// \returns A 32-bit unsigned integer containing the contents of the MXCSR +/// register. +unsigned int _mm_getcsr(void); +/// \brief Sets the MXCSR register with the 32-bit unsigned integer value. +/// +/// There are several groups of macros associated with this intrinsic, +/// including: +///
    +///
  • +/// For setting exception states: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO, +/// _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW, +/// _MM_EXCEPT_INEXACT. There is a convenience wrapper +/// _MM_SET_EXCEPTION_STATE(x) where x is one of these macros. +///
  • +///
  • +/// For setting exception masks: _MM_MASK_UNDERFLOW, _MM_MASK_OVERFLOW, +/// _MM_MASK_INVALID, _MM_MASK_DENORM, _MM_MASK_DIV_ZERO, _MM_MASK_INEXACT. +/// There is a convenience wrapper _MM_SET_EXCEPTION_MASK(x) where x is one +/// of these macros. +///
  • +///
  • +/// For setting rounding modes: _MM_ROUND_NEAREST, _MM_ROUND_DOWN, +/// _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO. There is a convenience wrapper +/// _MM_SET_ROUNDING_MODE(x) where x is one of these macros. +///
  • +///
  • +/// For setting flush-to-zero mode: _MM_FLUSH_ZERO_ON, _MM_FLUSH_ZERO_OFF. +/// There is a convenience wrapper _MM_SET_FLUSH_ZERO_MODE(x) where x is +/// one of these macros. +///
  • +///
  • +/// For setting denormals-are-zero mode: _MM_DENORMALS_ZERO_ON, +/// _MM_DENORMALS_ZERO_OFF. There is a convenience wrapper +/// _MM_SET_DENORMALS_ZERO_MODE(x) where x is one of these macros. +///
  • +///
+/// +/// For example, the following expression causes subsequent floating-point +/// operations to round up: +/// _mm_setcsr(_mm_getcsr() | _MM_ROUND_UP) +/// +/// The following example sets the DAZ and FTZ flags: +/// void setFlags() { +/// _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON) +/// _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON) +/// } +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VLDMXCSR / LDMXCSR instruction. +/// +/// \param __i +/// A 32-bit unsigned integer value to be written to the MXCSR register. +void _mm_setcsr(unsigned int); + +#if defined(__cplusplus) +} // extern "C" +#endif + +/// \brief Selects 4 float values from the 128-bit operands of [4 x float], as +/// specified by the immediate value operand. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_shuffle_ps(__m128 a, __m128 b, const int mask); +/// \endcode +/// +/// This intrinsic corresponds to the VSHUFPS / SHUFPS instruction. +/// +/// \param a +/// A 128-bit vector of [4 x float]. +/// \param b +/// A 128-bit vector of [4 x float]. +/// \param mask +/// An immediate value containing an 8-bit value specifying which elements to +/// copy from \ a and \a b. \n +/// Bits [3:0] specify the values copied from operand \a a. \n +/// Bits [7:4] specify the values copied from operand \a b. \n +/// The destinations within the 128-bit destination are assigned values as +/// follows: \n +/// Bits [1:0] are used to assign values to bits [31:0] in the +/// destination. \n +/// Bits [3:2] are used to assign values to bits [63:32] in the +/// destination. \n +/// Bits [5:4] are used to assign values to bits [95:64] in the +/// destination. \n +/// Bits [7:6] are used to assign values to bits [127:96] in the +/// destination. \n +/// Bit value assignments: \n +/// 00: Bits [31:0] copied from the specified operand. \n +/// 01: Bits [63:32] copied from the specified operand. \n +/// 10: Bits [95:64] copied from the specified operand. \n +/// 11: Bits [127:96] copied from the specified operand. +/// \returns A 128-bit vector of [4 x float] containing the shuffled values. #define _mm_shuffle_ps(a, b, mask) __extension__ ({ \ - __m128 __a = (a); \ - __m128 __b = (b); \ - (__m128)__builtin_shufflevector((__v4sf)__a, (__v4sf)__b, \ - (mask) & 0x3, ((mask) & 0xc) >> 2, \ - (((mask) & 0x30) >> 4) + 4, \ - (((mask) & 0xc0) >> 6) + 4); }) + (__m128)__builtin_shufflevector((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), \ + 0 + (((mask) >> 0) & 0x3), \ + 0 + (((mask) >> 2) & 0x3), \ + 4 + (((mask) >> 4) & 0x3), \ + 4 + (((mask) >> 6) & 0x3)); }) +/// \brief Unpacks the high-order (index 2,3) values from two 128-bit vectors of +/// [4 x float] and interleaves them into a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKHPS / UNPCKHPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. \n +/// Bits [95:64] are written to bits [31:0] of the destination. \n +/// Bits [127:96] are written to bits [95:64] of the destination. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// Bits [95:64] are written to bits [63:32] of the destination. \n +/// Bits [127:96] are written to bits [127:96] of the destination. +/// \returns A 128-bit vector of [4 x float] containing the interleaved values. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_unpackhi_ps(__m128 __a, __m128 __b) { - return __builtin_shufflevector(__a, __b, 2, 6, 3, 7); + return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 2, 6, 3, 7); } +/// \brief Unpacks the low-order (index 0,1) values from two 128-bit vectors of +/// [4 x float] and interleaves them into a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPS / UNPCKLPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. \n +/// Bits [31:0] are written to bits [31:0] of the destination. \n +/// Bits [63:32] are written to bits [95:64] of the destination. +/// \param __b +/// A 128-bit vector of [4 x float]. \n +/// Bits [31:0] are written to bits [63:32] of the destination. \n +/// Bits [63:32] are written to bits [127:96] of the destination. +/// \returns A 128-bit vector of [4 x float] containing the interleaved values. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_unpacklo_ps(__m128 __a, __m128 __b) { - return __builtin_shufflevector(__a, __b, 0, 4, 1, 5); + return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 0, 4, 1, 5); } +/// \brief Constructs a 128-bit floating-point vector of [4 x float]. The lower +/// 32 bits are set to the lower 32 bits of the second parameter. The upper +/// 96 bits are set to the upper 96 bits of the first parameter. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSS / MOVSS instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [4 x float]. The upper 96 bits are +/// written to the upper 96 bits of the result. +/// \param __b +/// A 128-bit floating-point vector of [4 x float]. The lower 32 bits are +/// written to the lower 32 bits of the result. +/// \returns A 128-bit floating-point vector of [4 x float]. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_move_ss(__m128 __a, __m128 __b) { - return __builtin_shufflevector(__a, __b, 4, 1, 2, 3); + return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 4, 1, 2, 3); } +/// \brief Constructs a 128-bit floating-point vector of [4 x float]. The lower +/// 64 bits are set to the upper 64 bits of the second parameter. The upper +/// 64 bits are set to the upper 64 bits of the first parameter. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKHPD / UNPCKHPD instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [4 x float]. The upper 64 bits are +/// written to the upper 64 bits of the result. +/// \param __b +/// A 128-bit floating-point vector of [4 x float]. The upper 64 bits are +/// written to the lower 64 bits of the result. +/// \returns A 128-bit floating-point vector of [4 x float]. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_movehl_ps(__m128 __a, __m128 __b) { - return __builtin_shufflevector(__a, __b, 6, 7, 2, 3); + return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 6, 7, 2, 3); } +/// \brief Constructs a 128-bit floating-point vector of [4 x float]. The lower +/// 64 bits are set to the lower 64 bits of the first parameter. The upper +/// 64 bits are set to the lower 64 bits of the second parameter. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPD / UNPCKLPD instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [4 x float]. The lower 64 bits are +/// written to the lower 64 bits of the result. +/// \param __b +/// A 128-bit floating-point vector of [4 x float]. The lower 64 bits are +/// written to the upper 64 bits of the result. +/// \returns A 128-bit floating-point vector of [4 x float]. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_movelh_ps(__m128 __a, __m128 __b) { - return __builtin_shufflevector(__a, __b, 0, 1, 4, 5); + return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 0, 1, 4, 5); } +/// \brief Converts a 64-bit vector of [4 x i16] into a 128-bit vector of [4 x +/// float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPI2PS + \c COMPOSITE +/// instruction. +/// +/// \param __a +/// A 64-bit vector of [4 x i16]. The elements of the destination are copied +/// from the corresponding elements in this operand. +/// \returns A 128-bit vector of [4 x float] containing the copied and converted +/// values from the operand. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtpi16_ps(__m64 __a) { @@ -847,6 +2704,19 @@ _mm_cvtpi16_ps(__m64 __a) return __r; } +/// \brief Converts a 64-bit vector of 16-bit unsigned integer values into a +/// 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPI2PS + \c COMPOSITE +/// instruction. +/// +/// \param __a +/// A 64-bit vector of 16-bit unsigned integer values. The elements of the +/// destination are copied from the corresponding elements in this operand. +/// \returns A 128-bit vector of [4 x float] containing the copied and converted +/// values from the operand. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtpu16_ps(__m64 __a) { @@ -864,11 +2734,24 @@ _mm_cvtpu16_ps(__m64 __a) return __r; } +/// \brief Converts the lower four 8-bit values from a 64-bit vector of [8 x i8] +/// into a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPI2PS + \c COMPOSITE +/// instruction. +/// +/// \param __a +/// A 64-bit vector of [8 x i8]. The elements of the destination are copied +/// from the corresponding lower 4 elements in this operand. +/// \returns A 128-bit vector of [4 x float] containing the copied and converted +/// values from the operand. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtpi8_ps(__m64 __a) { __m64 __b; - + __b = _mm_setzero_si64(); __b = _mm_cmpgt_pi8(__b, __a); __b = _mm_unpacklo_pi8(__a, __b); @@ -876,22 +2759,53 @@ _mm_cvtpi8_ps(__m64 __a) return _mm_cvtpi16_ps(__b); } +/// \brief Converts the lower four unsigned 8-bit integer values from a 64-bit +/// vector of [8 x u8] into a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPI2PS + \c COMPOSITE +/// instruction. +/// +/// \param __a +/// A 64-bit vector of unsigned 8-bit integer values. The elements of the +/// destination are copied from the corresponding lower 4 elements in this +/// operand. +/// \returns A 128-bit vector of [4 x float] containing the copied and converted +/// values from the source operand. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtpu8_ps(__m64 __a) { __m64 __b; - + __b = _mm_setzero_si64(); __b = _mm_unpacklo_pi8(__a, __b); return _mm_cvtpi16_ps(__b); } +/// \brief Converts the two 32-bit signed integer values from each 64-bit vector +/// operand of [2 x i32] into a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPI2PS + \c COMPOSITE +/// instruction. +/// +/// \param __a +/// A 64-bit vector of [2 x i32]. The lower elements of the destination are +/// copied from the elements in this operand. +/// \param __b +/// A 64-bit vector of [2 x i32]. The upper elements of the destination are +/// copied from the elements in this operand. +/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the +/// copied and converted values from the first operand. The upper 64 bits +/// contain the copied and converted values from the second operand. static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtpi32x2_ps(__m64 __a, __m64 __b) { __m128 __c; - + __c = _mm_setzero_ps(); __c = _mm_cvtpi32_ps(__c, __b); __c = _mm_movelh_ps(__c, __c); @@ -899,35 +2813,87 @@ _mm_cvtpi32x2_ps(__m64 __a, __m64 __b) return _mm_cvtpi32_ps(__c, __a); } +/// \brief Converts each single-precision floating-point element of a 128-bit +/// floating-point vector of [4 x float] into a 16-bit signed integer, and +/// packs the results into a 64-bit integer vector of [4 x i16]. If the +/// floating-point element is NaN or infinity, or if the floating-point +/// element is greater than 0x7FFFFFFF or less than -0x8000, it is converted +/// to 0x8000. Otherwise if the floating-point element is greater than +/// 0x7FFF, it is converted to 0x7FFF. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPS2PI + \c COMPOSITE +/// instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [4 x float]. +/// \returns A 64-bit integer vector of [4 x i16] containing the converted +/// values. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_cvtps_pi16(__m128 __a) { __m64 __b, __c; - + __b = _mm_cvtps_pi32(__a); __a = _mm_movehl_ps(__a, __a); __c = _mm_cvtps_pi32(__a); - + return _mm_packs_pi32(__b, __c); } +/// \brief Converts each single-precision floating-point element of a 128-bit +/// floating-point vector of [4 x float] into an 8-bit signed integer, and +/// packs the results into the lower 32 bits of a 64-bit integer vector of +/// [8 x i8]. The upper 32 bits of the vector are set to 0. If the +/// floating-point element is NaN or infinity, or if the floating-point +/// element is greater than 0x7FFFFFFF or less than -0x80, it is converted +/// to 0x80. Otherwise if the floating-point element is greater than 0x7F, +/// it is converted to 0x7F. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPS2PI + \c COMPOSITE +/// instruction. +/// +/// \param __a +/// 128-bit floating-point vector of [4 x float]. +/// \returns A 64-bit integer vector of [8 x i8]. The lower 32 bits contain the +/// converted values and the uppper 32 bits are set to zero. static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_cvtps_pi8(__m128 __a) { __m64 __b, __c; - + __b = _mm_cvtps_pi16(__a); __c = _mm_setzero_si64(); - + return _mm_packs_pi16(__b, __c); } +/// \brief Extracts the sign bits from each single-precision floating-point +/// element of a 128-bit floating-point vector of [4 x float] and returns the +/// sign bits in bits [0:3] of the result. Bits [31:4] of the result are set +/// to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVMSKPS / MOVMSKPS instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [4 x float]. +/// \returns A 32-bit integer value. Bits [3:0] contain the sign bits from each +/// single-precision floating-point element of the parameter. Bits [31:4] are +/// set to zero. static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_ps(__m128 __a) { - return __builtin_ia32_movmskps(__a); + return __builtin_ia32_movmskps((__v4sf)__a); } + +#define _MM_ALIGN16 __attribute__((aligned(16))) + #define _MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w)) #define _MM_EXCEPT_INVALID (0x0001) @@ -999,10 +2965,8 @@ do { \ #undef __DEFAULT_FN_ATTRS /* Ugly hack for backwards-compatibility (compatible with gcc) */ -#if defined(__SSE2__) && !__has_feature(modules) +#if defined(__SSE2__) && !__building_module(_Builtin_intrinsics) #include #endif -#endif /* __SSE__ */ - #endif /* __XMMINTRIN_H */ diff --git a/c_headers/xopintrin.h b/c_headers/xopintrin.h index 2eb35c4be..bdf0cec32 100644 --- a/c_headers/xopintrin.h +++ b/c_headers/xopintrin.h @@ -28,14 +28,10 @@ #ifndef __XOPINTRIN_H #define __XOPINTRIN_H -#ifndef __XOP__ -# error "XOP instruction set is not enabled" -#else - #include /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xop"))) static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_maccs_epi16(__m128i __A, __m128i __B, __m128i __C) @@ -202,13 +198,13 @@ _mm_hsubq_epi32(__m128i __A) static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmov_si128(__m128i __A, __m128i __B, __m128i __C) { - return (__m128i)__builtin_ia32_vpcmov(__A, __B, __C); + return (__m128i)__builtin_ia32_vpcmov((__v2di)__A, (__v2di)__B, (__v2di)__C); } static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_cmov_si256(__m256i __A, __m256i __B, __m256i __C) { - return (__m256i)__builtin_ia32_vpcmov_256(__A, __B, __C); + return (__m256i)__builtin_ia32_vpcmov_256((__v4di)__A, (__v4di)__B, (__v4di)__C); } static __inline__ __m128i __DEFAULT_FN_ATTRS @@ -242,20 +238,16 @@ _mm_rot_epi64(__m128i __A, __m128i __B) } #define _mm_roti_epi8(A, N) __extension__ ({ \ - __m128i __A = (A); \ - (__m128i)__builtin_ia32_vprotbi((__v16qi)__A, (N)); }) + (__m128i)__builtin_ia32_vprotbi((__v16qi)(__m128i)(A), (N)); }) #define _mm_roti_epi16(A, N) __extension__ ({ \ - __m128i __A = (A); \ - (__m128i)__builtin_ia32_vprotwi((__v8hi)__A, (N)); }) + (__m128i)__builtin_ia32_vprotwi((__v8hi)(__m128i)(A), (N)); }) #define _mm_roti_epi32(A, N) __extension__ ({ \ - __m128i __A = (A); \ - (__m128i)__builtin_ia32_vprotdi((__v4si)__A, (N)); }) + (__m128i)__builtin_ia32_vprotdi((__v4si)(__m128i)(A), (N)); }) #define _mm_roti_epi64(A, N) __extension__ ({ \ - __m128i __A = (A); \ - (__m128i)__builtin_ia32_vprotqi((__v2di)__A, (N)); }) + (__m128i)__builtin_ia32_vprotqi((__v2di)(__m128i)(A), (N)); }) static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_shl_epi8(__m128i __A, __m128i __B) @@ -306,44 +298,36 @@ _mm_sha_epi64(__m128i __A, __m128i __B) } #define _mm_com_epu8(A, B, N) __extension__ ({ \ - __m128i __A = (A); \ - __m128i __B = (B); \ - (__m128i)__builtin_ia32_vpcomub((__v16qi)__A, (__v16qi)__B, (N)); }) + (__m128i)__builtin_ia32_vpcomub((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (N)); }) #define _mm_com_epu16(A, B, N) __extension__ ({ \ - __m128i __A = (A); \ - __m128i __B = (B); \ - (__m128i)__builtin_ia32_vpcomuw((__v8hi)__A, (__v8hi)__B, (N)); }) + (__m128i)__builtin_ia32_vpcomuw((__v8hi)(__m128i)(A), \ + (__v8hi)(__m128i)(B), (N)); }) #define _mm_com_epu32(A, B, N) __extension__ ({ \ - __m128i __A = (A); \ - __m128i __B = (B); \ - (__m128i)__builtin_ia32_vpcomud((__v4si)__A, (__v4si)__B, (N)); }) + (__m128i)__builtin_ia32_vpcomud((__v4si)(__m128i)(A), \ + (__v4si)(__m128i)(B), (N)); }) #define _mm_com_epu64(A, B, N) __extension__ ({ \ - __m128i __A = (A); \ - __m128i __B = (B); \ - (__m128i)__builtin_ia32_vpcomuq((__v2di)__A, (__v2di)__B, (N)); }) + (__m128i)__builtin_ia32_vpcomuq((__v2di)(__m128i)(A), \ + (__v2di)(__m128i)(B), (N)); }) #define _mm_com_epi8(A, B, N) __extension__ ({ \ - __m128i __A = (A); \ - __m128i __B = (B); \ - (__m128i)__builtin_ia32_vpcomb((__v16qi)__A, (__v16qi)__B, (N)); }) + (__m128i)__builtin_ia32_vpcomb((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (N)); }) #define _mm_com_epi16(A, B, N) __extension__ ({ \ - __m128i __A = (A); \ - __m128i __B = (B); \ - (__m128i)__builtin_ia32_vpcomw((__v8hi)__A, (__v8hi)__B, (N)); }) + (__m128i)__builtin_ia32_vpcomw((__v8hi)(__m128i)(A), \ + (__v8hi)(__m128i)(B), (N)); }) #define _mm_com_epi32(A, B, N) __extension__ ({ \ - __m128i __A = (A); \ - __m128i __B = (B); \ - (__m128i)__builtin_ia32_vpcomd((__v4si)__A, (__v4si)__B, (N)); }) + (__m128i)__builtin_ia32_vpcomd((__v4si)(__m128i)(A), \ + (__v4si)(__m128i)(B), (N)); }) #define _mm_com_epi64(A, B, N) __extension__ ({ \ - __m128i __A = (A); \ - __m128i __B = (B); \ - (__m128i)__builtin_ia32_vpcomq((__v2di)__A, (__v2di)__B, (N)); }) + (__m128i)__builtin_ia32_vpcomq((__v2di)(__m128i)(A), \ + (__v2di)(__m128i)(B), (N)); }) #define _MM_PCOMCTRL_LT 0 #define _MM_PCOMCTRL_LE 1 @@ -739,32 +723,23 @@ _mm_comtrue_epi64(__m128i __A, __m128i __B) } #define _mm_permute2_pd(X, Y, C, I) __extension__ ({ \ - __m128d __X = (X); \ - __m128d __Y = (Y); \ - __m128i __C = (C); \ - (__m128d)__builtin_ia32_vpermil2pd((__v2df)__X, (__v2df)__Y, \ - (__v2di)__C, (I)); }) + (__m128d)__builtin_ia32_vpermil2pd((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), \ + (__v2di)(__m128i)(C), (I)); }) #define _mm256_permute2_pd(X, Y, C, I) __extension__ ({ \ - __m256d __X = (X); \ - __m256d __Y = (Y); \ - __m256i __C = (C); \ - (__m256d)__builtin_ia32_vpermil2pd256((__v4df)__X, (__v4df)__Y, \ - (__v4di)__C, (I)); }) + (__m256d)__builtin_ia32_vpermil2pd256((__v4df)(__m256d)(X), \ + (__v4df)(__m256d)(Y), \ + (__v4di)(__m256i)(C), (I)); }) #define _mm_permute2_ps(X, Y, C, I) __extension__ ({ \ - __m128 __X = (X); \ - __m128 __Y = (Y); \ - __m128i __C = (C); \ - (__m128)__builtin_ia32_vpermil2ps((__v4sf)__X, (__v4sf)__Y, \ - (__v4si)__C, (I)); }) + (__m128)__builtin_ia32_vpermil2ps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \ + (__v4si)(__m128i)(C), (I)); }) #define _mm256_permute2_ps(X, Y, C, I) __extension__ ({ \ - __m256 __X = (X); \ - __m256 __Y = (Y); \ - __m256i __C = (C); \ - (__m256)__builtin_ia32_vpermil2ps256((__v8sf)__X, (__v8sf)__Y, \ - (__v8si)__C, (I)); }) + (__m256)__builtin_ia32_vpermil2ps256((__v8sf)(__m256)(X), \ + (__v8sf)(__m256)(Y), \ + (__v8si)(__m256i)(C), (I)); }) static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_frcz_ss(__m128 __A) @@ -804,6 +779,4 @@ _mm256_frcz_pd(__m256d __A) #undef __DEFAULT_FN_ATTRS -#endif /* __XOP__ */ - #endif /* __XOPINTRIN_H */ diff --git a/c_headers/xsavecintrin.h b/c_headers/xsavecintrin.h new file mode 100644 index 000000000..598470a68 --- /dev/null +++ b/c_headers/xsavecintrin.h @@ -0,0 +1,48 @@ +/*===---- xsavecintrin.h - XSAVEC intrinsic ------------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __XSAVECINTRIN_H +#define __XSAVECINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsavec"))) + +static __inline__ void __DEFAULT_FN_ATTRS +_xsavec(void *__p, unsigned long long __m) { + __builtin_ia32_xsavec(__p, __m); +} + +#ifdef __x86_64__ +static __inline__ void __DEFAULT_FN_ATTRS +_xsavec64(void *__p, unsigned long long __m) { + __builtin_ia32_xsavec64(__p, __m); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/c_headers/xsaveintrin.h b/c_headers/xsaveintrin.h new file mode 100644 index 000000000..a2e6b2e74 --- /dev/null +++ b/c_headers/xsaveintrin.h @@ -0,0 +1,58 @@ +/*===---- xsaveintrin.h - XSAVE intrinsic ------------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __XSAVEINTRIN_H +#define __XSAVEINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsave"))) + +static __inline__ void __DEFAULT_FN_ATTRS +_xsave(void *__p, unsigned long long __m) { + return __builtin_ia32_xsave(__p, __m); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_xrstor(void *__p, unsigned long long __m) { + return __builtin_ia32_xrstor(__p, __m); +} + +#ifdef __x86_64__ +static __inline__ void __DEFAULT_FN_ATTRS +_xsave64(void *__p, unsigned long long __m) { + return __builtin_ia32_xsave64(__p, __m); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_xrstor64(void *__p, unsigned long long __m) { + return __builtin_ia32_xrstor64(__p, __m); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/c_headers/xsaveoptintrin.h b/c_headers/xsaveoptintrin.h new file mode 100644 index 000000000..d3faae78b --- /dev/null +++ b/c_headers/xsaveoptintrin.h @@ -0,0 +1,48 @@ +/*===---- xsaveoptintrin.h - XSAVEOPT intrinsic ------------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __XSAVEOPTINTRIN_H +#define __XSAVEOPTINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsaveopt"))) + +static __inline__ void __DEFAULT_FN_ATTRS +_xsaveopt(void *__p, unsigned long long __m) { + return __builtin_ia32_xsaveopt(__p, __m); +} + +#ifdef __x86_64__ +static __inline__ void __DEFAULT_FN_ATTRS +_xsaveopt64(void *__p, unsigned long long __m) { + return __builtin_ia32_xsaveopt64(__p, __m); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/c_headers/xsavesintrin.h b/c_headers/xsavesintrin.h new file mode 100644 index 000000000..c5e540a86 --- /dev/null +++ b/c_headers/xsavesintrin.h @@ -0,0 +1,58 @@ +/*===---- xsavesintrin.h - XSAVES intrinsic ------------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __XSAVESINTRIN_H +#define __XSAVESINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsaves"))) + +static __inline__ void __DEFAULT_FN_ATTRS +_xsaves(void *__p, unsigned long long __m) { + __builtin_ia32_xsaves(__p, __m); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_xrstors(void *__p, unsigned long long __m) { + __builtin_ia32_xrstors(__p, __m); +} + +#ifdef __x86_64__ +static __inline__ void __DEFAULT_FN_ATTRS +_xrstors64(void *__p, unsigned long long __m) { + __builtin_ia32_xrstors64(__p, __m); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_xsaves64(void *__p, unsigned long long __m) { + __builtin_ia32_xsaves64(__p, __m); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif