25 #error "Never use <avx512vlintrin.h> directly; include <immintrin.h> instead." 28 #ifndef __AVX512VLINTRIN_H 29 #define __AVX512VLINTRIN_H 31 #define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl"), __min_vector_width__(128))) 32 #define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl"), __min_vector_width__(256))) 40 #define _mm_cmpeq_epi32_mask(A, B) \ 41 _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ) 42 #define _mm_mask_cmpeq_epi32_mask(k, A, B) \ 43 _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ) 44 #define _mm_cmpge_epi32_mask(A, B) \ 45 _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_GE) 46 #define _mm_mask_cmpge_epi32_mask(k, A, B) \ 47 _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE) 48 #define _mm_cmpgt_epi32_mask(A, B) \ 49 _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_GT) 50 #define _mm_mask_cmpgt_epi32_mask(k, A, B) \ 51 _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT) 52 #define _mm_cmple_epi32_mask(A, B) \ 53 _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_LE) 54 #define _mm_mask_cmple_epi32_mask(k, A, B) \ 55 _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE) 56 #define _mm_cmplt_epi32_mask(A, B) \ 57 _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_LT) 58 #define _mm_mask_cmplt_epi32_mask(k, A, B) \ 59 _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT) 60 #define _mm_cmpneq_epi32_mask(A, B) \ 61 _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_NE) 62 #define _mm_mask_cmpneq_epi32_mask(k, A, B) \ 63 _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE) 65 #define _mm256_cmpeq_epi32_mask(A, B) \ 66 _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ) 67 #define _mm256_mask_cmpeq_epi32_mask(k, A, B) \ 68 _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ) 69 #define _mm256_cmpge_epi32_mask(A, B) \ 70 _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_GE) 71 #define _mm256_mask_cmpge_epi32_mask(k, A, B) \ 72 _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE) 73 #define _mm256_cmpgt_epi32_mask(A, B) \ 74 _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_GT) 75 #define _mm256_mask_cmpgt_epi32_mask(k, A, B) \ 76 _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT) 77 #define _mm256_cmple_epi32_mask(A, B) \ 78 _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_LE) 79 #define _mm256_mask_cmple_epi32_mask(k, A, B) \ 80 _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE) 81 #define _mm256_cmplt_epi32_mask(A, B) \ 82 _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_LT) 83 #define _mm256_mask_cmplt_epi32_mask(k, A, B) \ 84 _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT) 85 #define _mm256_cmpneq_epi32_mask(A, B) \ 86 _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_NE) 87 #define _mm256_mask_cmpneq_epi32_mask(k, A, B) \ 88 _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE) 90 #define _mm_cmpeq_epu32_mask(A, B) \ 91 _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ) 92 #define _mm_mask_cmpeq_epu32_mask(k, A, B) \ 93 _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ) 94 #define _mm_cmpge_epu32_mask(A, B) \ 95 _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_GE) 96 #define _mm_mask_cmpge_epu32_mask(k, A, B) \ 97 _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE) 98 #define _mm_cmpgt_epu32_mask(A, B) \ 99 _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_GT) 100 #define _mm_mask_cmpgt_epu32_mask(k, A, B) \ 101 _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT) 102 #define _mm_cmple_epu32_mask(A, B) \ 103 _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_LE) 104 #define _mm_mask_cmple_epu32_mask(k, A, B) \ 105 _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE) 106 #define _mm_cmplt_epu32_mask(A, B) \ 107 _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_LT) 108 #define _mm_mask_cmplt_epu32_mask(k, A, B) \ 109 _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT) 110 #define _mm_cmpneq_epu32_mask(A, B) \ 111 _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_NE) 112 #define _mm_mask_cmpneq_epu32_mask(k, A, B) \ 113 _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE) 115 #define _mm256_cmpeq_epu32_mask(A, B) \ 116 _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ) 117 #define _mm256_mask_cmpeq_epu32_mask(k, A, B) \ 118 _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ) 119 #define _mm256_cmpge_epu32_mask(A, B) \ 120 _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_GE) 121 #define _mm256_mask_cmpge_epu32_mask(k, A, B) \ 122 _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE) 123 #define _mm256_cmpgt_epu32_mask(A, B) \ 124 _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_GT) 125 #define _mm256_mask_cmpgt_epu32_mask(k, A, B) \ 126 _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT) 127 #define _mm256_cmple_epu32_mask(A, B) \ 128 _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_LE) 129 #define _mm256_mask_cmple_epu32_mask(k, A, B) \ 130 _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE) 131 #define _mm256_cmplt_epu32_mask(A, B) \ 132 _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_LT) 133 #define _mm256_mask_cmplt_epu32_mask(k, A, B) \ 134 _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT) 135 #define _mm256_cmpneq_epu32_mask(A, B) \ 136 _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_NE) 137 #define _mm256_mask_cmpneq_epu32_mask(k, A, B) \ 138 _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE) 140 #define _mm_cmpeq_epi64_mask(A, B) \ 141 _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ) 142 #define _mm_mask_cmpeq_epi64_mask(k, A, B) \ 143 _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ) 144 #define _mm_cmpge_epi64_mask(A, B) \ 145 _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_GE) 146 #define _mm_mask_cmpge_epi64_mask(k, A, B) \ 147 _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE) 148 #define _mm_cmpgt_epi64_mask(A, B) \ 149 _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_GT) 150 #define _mm_mask_cmpgt_epi64_mask(k, A, B) \ 151 _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT) 152 #define _mm_cmple_epi64_mask(A, B) \ 153 _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_LE) 154 #define _mm_mask_cmple_epi64_mask(k, A, B) \ 155 _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE) 156 #define _mm_cmplt_epi64_mask(A, B) \ 157 _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_LT) 158 #define _mm_mask_cmplt_epi64_mask(k, A, B) \ 159 _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT) 160 #define _mm_cmpneq_epi64_mask(A, B) \ 161 _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_NE) 162 #define _mm_mask_cmpneq_epi64_mask(k, A, B) \ 163 _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE) 165 #define _mm256_cmpeq_epi64_mask(A, B) \ 166 _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ) 167 #define _mm256_mask_cmpeq_epi64_mask(k, A, B) \ 168 _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ) 169 #define _mm256_cmpge_epi64_mask(A, B) \ 170 _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_GE) 171 #define _mm256_mask_cmpge_epi64_mask(k, A, B) \ 172 _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE) 173 #define _mm256_cmpgt_epi64_mask(A, B) \ 174 _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_GT) 175 #define _mm256_mask_cmpgt_epi64_mask(k, A, B) \ 176 _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT) 177 #define _mm256_cmple_epi64_mask(A, B) \ 178 _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_LE) 179 #define _mm256_mask_cmple_epi64_mask(k, A, B) \ 180 _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE) 181 #define _mm256_cmplt_epi64_mask(A, B) \ 182 _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_LT) 183 #define _mm256_mask_cmplt_epi64_mask(k, A, B) \ 184 _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT) 185 #define _mm256_cmpneq_epi64_mask(A, B) \ 186 _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_NE) 187 #define _mm256_mask_cmpneq_epi64_mask(k, A, B) \ 188 _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE) 190 #define _mm_cmpeq_epu64_mask(A, B) \ 191 _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ) 192 #define _mm_mask_cmpeq_epu64_mask(k, A, B) \ 193 _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ) 194 #define _mm_cmpge_epu64_mask(A, B) \ 195 _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_GE) 196 #define _mm_mask_cmpge_epu64_mask(k, A, B) \ 197 _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE) 198 #define _mm_cmpgt_epu64_mask(A, B) \ 199 _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_GT) 200 #define _mm_mask_cmpgt_epu64_mask(k, A, B) \ 201 _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT) 202 #define _mm_cmple_epu64_mask(A, B) \ 203 _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_LE) 204 #define _mm_mask_cmple_epu64_mask(k, A, B) \ 205 _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE) 206 #define _mm_cmplt_epu64_mask(A, B) \ 207 _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_LT) 208 #define _mm_mask_cmplt_epu64_mask(k, A, B) \ 209 _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT) 210 #define _mm_cmpneq_epu64_mask(A, B) \ 211 _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_NE) 212 #define _mm_mask_cmpneq_epu64_mask(k, A, B) \ 213 _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE) 215 #define _mm256_cmpeq_epu64_mask(A, B) \ 216 _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ) 217 #define _mm256_mask_cmpeq_epu64_mask(k, A, B) \ 218 _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ) 219 #define _mm256_cmpge_epu64_mask(A, B) \ 220 _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_GE) 221 #define _mm256_mask_cmpge_epu64_mask(k, A, B) \ 222 _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE) 223 #define _mm256_cmpgt_epu64_mask(A, B) \ 224 _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_GT) 225 #define _mm256_mask_cmpgt_epu64_mask(k, A, B) \ 226 _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT) 227 #define _mm256_cmple_epu64_mask(A, B) \ 228 _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_LE) 229 #define _mm256_mask_cmple_epu64_mask(k, A, B) \ 230 _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE) 231 #define _mm256_cmplt_epu64_mask(A, B) \ 232 _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_LT) 233 #define _mm256_mask_cmplt_epu64_mask(k, A, B) \ 234 _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT) 235 #define _mm256_cmpneq_epu64_mask(A, B) \ 236 _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_NE) 237 #define _mm256_mask_cmpneq_epu64_mask(k, A, B) \ 238 _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE) 243 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
251 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
259 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
267 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
275 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
283 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
291 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
299 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
307 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
315 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
323 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
331 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
339 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
347 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
355 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
363 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
371 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
379 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
387 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
395 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
403 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
411 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
419 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
427 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
435 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
443 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
451 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
459 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
467 return (__m256i)((__v8su)__a & (__v8su)
__b);
473 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
487 return (__m128i)((__v4su)__a & (__v4su)
__b);
493 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
507 return (__m256i)(~(__v8su)__A & (__v8su)__B);
513 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
528 return (__m128i)(~(__v4su)__A & (__v4su)__B);
534 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
548 return (__m256i)((__v8su)__a | (__v8su)
__b);
554 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
568 return (__m128i)((__v4su)__a | (__v4su)
__b);
574 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
588 return (__m256i)((__v8su)__a ^ (__v8su)
__b);
594 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
608 return (__m128i)((__v4su)__a ^ (__v4su)
__b);
614 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
628 return (__m256i)((__v4du)__a & (__v4du)
__b);
634 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
648 return (__m128i)((__v2du)__a & (__v2du)
__b);
654 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
668 return (__m256i)(~(__v4du)__A & (__v4du)__B);
674 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
689 return (__m128i)(~(__v2du)__A & (__v2du)__B);
695 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
709 return (__m256i)((__v4du)__a | (__v4du)
__b);
715 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
729 return (__m128i)((__v2du)__a | (__v2du)
__b);
735 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
749 return (__m256i)((__v4du)__a ^ (__v4du)
__b);
755 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
769 return (__m128i)((__v2du)__a ^ (__v2du)
__b);
776 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
787 #define _mm_cmp_epi32_mask(a, b, p) \ 788 (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \ 789 (__v4si)(__m128i)(b), (int)(p), \ 792 #define _mm_mask_cmp_epi32_mask(m, a, b, p) \ 793 (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \ 794 (__v4si)(__m128i)(b), (int)(p), \ 797 #define _mm_cmp_epu32_mask(a, b, p) \ 798 (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \ 799 (__v4si)(__m128i)(b), (int)(p), \ 802 #define _mm_mask_cmp_epu32_mask(m, a, b, p) \ 803 (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \ 804 (__v4si)(__m128i)(b), (int)(p), \ 807 #define _mm256_cmp_epi32_mask(a, b, p) \ 808 (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \ 809 (__v8si)(__m256i)(b), (int)(p), \ 812 #define _mm256_mask_cmp_epi32_mask(m, a, b, p) \ 813 (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \ 814 (__v8si)(__m256i)(b), (int)(p), \ 817 #define _mm256_cmp_epu32_mask(a, b, p) \ 818 (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \ 819 (__v8si)(__m256i)(b), (int)(p), \ 822 #define _mm256_mask_cmp_epu32_mask(m, a, b, p) \ 823 (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \ 824 (__v8si)(__m256i)(b), (int)(p), \ 827 #define _mm_cmp_epi64_mask(a, b, p) \ 828 (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \ 829 (__v2di)(__m128i)(b), (int)(p), \ 832 #define _mm_mask_cmp_epi64_mask(m, a, b, p) \ 833 (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \ 834 (__v2di)(__m128i)(b), (int)(p), \ 837 #define _mm_cmp_epu64_mask(a, b, p) \ 838 (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \ 839 (__v2di)(__m128i)(b), (int)(p), \ 842 #define _mm_mask_cmp_epu64_mask(m, a, b, p) \ 843 (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \ 844 (__v2di)(__m128i)(b), (int)(p), \ 847 #define _mm256_cmp_epi64_mask(a, b, p) \ 848 (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \ 849 (__v4di)(__m256i)(b), (int)(p), \ 852 #define _mm256_mask_cmp_epi64_mask(m, a, b, p) \ 853 (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \ 854 (__v4di)(__m256i)(b), (int)(p), \ 857 #define _mm256_cmp_epu64_mask(a, b, p) \ 858 (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \ 859 (__v4di)(__m256i)(b), (int)(p), \ 862 #define _mm256_mask_cmp_epu64_mask(m, a, b, p) \ 863 (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \ 864 (__v4di)(__m256i)(b), (int)(p), \ 867 #define _mm256_cmp_ps_mask(a, b, p) \ 868 (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \ 869 (__v8sf)(__m256)(b), (int)(p), \ 872 #define _mm256_mask_cmp_ps_mask(m, a, b, p) \ 873 (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \ 874 (__v8sf)(__m256)(b), (int)(p), \ 877 #define _mm256_cmp_pd_mask(a, b, p) \ 878 (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \ 879 (__v4df)(__m256d)(b), (int)(p), \ 882 #define _mm256_mask_cmp_pd_mask(m, a, b, p) \ 883 (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \ 884 (__v4df)(__m256d)(b), (int)(p), \ 887 #define _mm_cmp_ps_mask(a, b, p) \ 888 (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \ 889 (__v4sf)(__m128)(b), (int)(p), \ 892 #define _mm_mask_cmp_ps_mask(m, a, b, p) \ 893 (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \ 894 (__v4sf)(__m128)(b), (int)(p), \ 897 #define _mm_cmp_pd_mask(a, b, p) \ 898 (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \ 899 (__v2df)(__m128d)(b), (int)(p), \ 902 #define _mm_mask_cmp_pd_mask(m, a, b, p) \ 903 (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \ 904 (__v2df)(__m128d)(b), (int)(p), \ 910 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
911 __builtin_ia32_vfmaddpd ((__v2df) __A,
920 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
921 __builtin_ia32_vfmaddpd ((__v2df) __A,
930 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
931 __builtin_ia32_vfmaddpd ((__v2df) __A,
940 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
941 __builtin_ia32_vfmaddpd ((__v2df) __A,
950 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
951 __builtin_ia32_vfmaddpd ((__v2df) __A,
960 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
961 __builtin_ia32_vfmaddpd (-(__v2df) __A,
970 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
971 __builtin_ia32_vfmaddpd (-(__v2df) __A,
980 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
981 __builtin_ia32_vfmaddpd (-(__v2df) __A,
990 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
991 __builtin_ia32_vfmaddpd256 ((__v4df) __A,
1000 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1001 __builtin_ia32_vfmaddpd256 ((__v4df) __A,
1010 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1011 __builtin_ia32_vfmaddpd256 ((__v4df) __A,
1020 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1021 __builtin_ia32_vfmaddpd256 ((__v4df) __A,
1030 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1031 __builtin_ia32_vfmaddpd256 ((__v4df) __A,
1040 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1041 __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
1050 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1051 __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
1060 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1061 __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
1070 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1071 __builtin_ia32_vfmaddps ((__v4sf) __A,
1080 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1081 __builtin_ia32_vfmaddps ((__v4sf) __A,
1090 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1091 __builtin_ia32_vfmaddps ((__v4sf) __A,
1100 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1101 __builtin_ia32_vfmaddps ((__v4sf) __A,
1110 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1111 __builtin_ia32_vfmaddps ((__v4sf) __A,
1120 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1121 __builtin_ia32_vfmaddps (-(__v4sf) __A,
1130 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1131 __builtin_ia32_vfmaddps (-(__v4sf) __A,
1140 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1141 __builtin_ia32_vfmaddps (-(__v4sf) __A,
1150 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1151 __builtin_ia32_vfmaddps256 ((__v8sf) __A,
1160 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1161 __builtin_ia32_vfmaddps256 ((__v8sf) __A,
1170 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1171 __builtin_ia32_vfmaddps256 ((__v8sf) __A,
1180 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1181 __builtin_ia32_vfmaddps256 ((__v8sf) __A,
1190 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1191 __builtin_ia32_vfmaddps256 ((__v8sf) __A,
1200 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1201 __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
1210 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1211 __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
1220 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1221 __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
1230 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1231 __builtin_ia32_vfmaddsubpd ((__v2df) __A,
1240 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1241 __builtin_ia32_vfmaddsubpd ((__v2df) __A,
1250 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1251 __builtin_ia32_vfmaddsubpd ((__v2df) __A,
1260 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1261 __builtin_ia32_vfmaddsubpd ((__v2df) __A,
1270 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1271 __builtin_ia32_vfmaddsubpd ((__v2df) __A,
1280 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1281 __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
1290 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1291 __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
1300 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1301 __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
1310 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1311 __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
1320 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1321 __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
1330 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1331 __builtin_ia32_vfmaddsubps ((__v4sf) __A,
1340 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1341 __builtin_ia32_vfmaddsubps ((__v4sf) __A,
1350 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1351 __builtin_ia32_vfmaddsubps ((__v4sf) __A,
1360 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1361 __builtin_ia32_vfmaddsubps ((__v4sf) __A,
1370 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1371 __builtin_ia32_vfmaddsubps ((__v4sf) __A,
1381 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1382 __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
1391 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1392 __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
1401 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1402 __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
1411 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1412 __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
1421 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1422 __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
1431 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1432 __builtin_ia32_vfmaddpd ((__v2df) __A,
1441 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1442 __builtin_ia32_vfmaddpd256 ((__v4df) __A,
1451 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1452 __builtin_ia32_vfmaddps ((__v4sf) __A,
1461 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1462 __builtin_ia32_vfmaddps256 ((__v8sf) __A,
1471 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1472 __builtin_ia32_vfmaddsubpd ((__v2df) __A,
1481 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1482 __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
1491 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1492 __builtin_ia32_vfmaddsubps ((__v4sf) __A,
1501 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1502 __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
1511 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1512 __builtin_ia32_vfmaddpd ((__v2df) __A,
1521 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1522 __builtin_ia32_vfmaddpd256 ((__v4df) __A,
1531 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1532 __builtin_ia32_vfmaddps ((__v4sf) __A,
1541 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1542 __builtin_ia32_vfmaddps256 ((__v8sf) __A,
1551 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1552 __builtin_ia32_vfmaddpd ((__v2df) __A,
1561 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1562 __builtin_ia32_vfmaddpd ((__v2df) __A,
1571 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1572 __builtin_ia32_vfmaddpd256 ((__v4df) __A,
1581 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1582 __builtin_ia32_vfmaddpd256 ((__v4df) __A,
1591 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1592 __builtin_ia32_vfmaddps ((__v4sf) __A,
1601 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1602 __builtin_ia32_vfmaddps ((__v4sf) __A,
1611 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1612 __builtin_ia32_vfmaddps256 ((__v8sf) __A,
1621 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1622 __builtin_ia32_vfmaddps256 ((__v8sf) __A,
1630 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
1637 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
1644 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
1651 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
1658 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
1665 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
1672 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
1679 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
1686 return (__m128i) __builtin_ia32_selectd_128 ((
__mmask8) __U,
1693 return (__m256i) __builtin_ia32_selectd_256 ((
__mmask8) __U,
1700 return (__m128d) __builtin_ia32_selectpd_128 ((
__mmask8) __U,
1707 return (__m256d) __builtin_ia32_selectpd_256 ((
__mmask8) __U,
1714 return (__m128) __builtin_ia32_selectps_128 ((
__mmask8) __U,
1721 return (__m256) __builtin_ia32_selectps_256 ((
__mmask8) __U,
1728 return (__m128i) __builtin_ia32_selectq_128 ((
__mmask8) __U,
1735 return (__m256i) __builtin_ia32_selectq_256 ((
__mmask8) __U,
1742 return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A,
1749 return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A,
1757 return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A,
1764 return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A,
1772 return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A,
1779 return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A,
1787 return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A,
1794 return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A,
1802 return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A,
1809 return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A,
1817 return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A,
1824 return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A,
1832 return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A,
1839 return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A,
1847 return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A,
1854 return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A,
1862 __builtin_ia32_compressstoredf128_mask ((__v2df *) __P,
1869 __builtin_ia32_compressstoredf256_mask ((__v4df *) __P,
1876 __builtin_ia32_compressstoredi128_mask ((__v2di *) __P,
1883 __builtin_ia32_compressstoredi256_mask ((__v4di *) __P,
1890 __builtin_ia32_compressstoresf128_mask ((__v4sf *) __P,
1897 __builtin_ia32_compressstoresf256_mask ((__v8sf *) __P,
1904 __builtin_ia32_compressstoresi128_mask ((__v4si *) __P,
1911 __builtin_ia32_compressstoresi256_mask ((__v8si *) __P,
1918 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8) __U,
1925 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8) __U,
1932 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8) __U,
1939 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8) __U,
1946 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
1953 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
1960 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
1967 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
1974 return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A,
1981 return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A,
1989 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
1996 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2003 return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A,
2010 return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A,
2018 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2025 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2032 return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
2040 return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
2047 return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
2055 return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
2063 return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
2070 return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
2078 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2085 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2092 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
2099 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
2106 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2113 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2120 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2127 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2134 return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
2142 return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
2149 return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
2157 return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
2165 return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
2172 return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
2180 return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A,
2187 return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A,
2195 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2202 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2209 return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
2217 return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
2224 return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
2232 return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
2240 return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
2247 return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
2255 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2262 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2269 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
2276 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
2283 return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
2291 return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
2298 return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
2306 return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
2314 return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
2321 return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
2329 return (__m128d) __builtin_convertvector(
2330 __builtin_shufflevector((__v4su)__A, (__v4su)__A, 0, 1), __v2df);
2335 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8) __U,
2342 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8) __U,
2349 return (__m256d)__builtin_convertvector((__v4su)__A, __v4df);
2354 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8) __U,
2361 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8) __U,
2368 return (__m128)__builtin_convertvector((__v4su)__A, __v4sf);
2373 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2380 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2387 return (__m256)__builtin_convertvector((__v8su)__A, __v8sf);
2392 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2399 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2406 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2413 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2420 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2427 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2434 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2441 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2448 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2455 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2462 return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A,
2469 return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A,
2477 return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A,
2484 return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A,
2492 return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A,
2499 return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A,
2507 return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A,
2514 return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A,
2522 return (__m128d) __builtin_ia32_expandloaddf128_mask ((__v2df *) __P,
2530 return (__m128d) __builtin_ia32_expandloaddf128_mask ((__v2df *) __P,
2539 return (__m256d) __builtin_ia32_expandloaddf256_mask ((__v4df *) __P,
2547 return (__m256d) __builtin_ia32_expandloaddf256_mask ((__v4df *) __P,
2556 return (__m128i) __builtin_ia32_expandloaddi128_mask ((__v2di *) __P,
2564 return (__m128i) __builtin_ia32_expandloaddi128_mask ((__v2di *) __P,
2574 return (__m256i) __builtin_ia32_expandloaddi256_mask ((__v4di *) __P,
2582 return (__m256i) __builtin_ia32_expandloaddi256_mask ((__v4di *) __P,
2591 return (__m128) __builtin_ia32_expandloadsf128_mask ((__v4sf *) __P,
2598 return (__m128) __builtin_ia32_expandloadsf128_mask ((__v4sf *) __P,
2607 return (__m256) __builtin_ia32_expandloadsf256_mask ((__v8sf *) __P,
2614 return (__m256) __builtin_ia32_expandloadsf256_mask ((__v8sf *) __P,
2623 return (__m128i) __builtin_ia32_expandloadsi128_mask ((__v4si *) __P,
2631 return (__m128i) __builtin_ia32_expandloadsi128_mask ((__v4si *) __P,
2640 return (__m256i) __builtin_ia32_expandloadsi256_mask ((__v8si *) __P,
2648 return (__m256i) __builtin_ia32_expandloadsi256_mask ((__v8si *) __P,
2657 return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A,
2664 return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A,
2672 return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A,
2679 return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A,
2687 return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A,
2694 return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A,
2702 return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A,
2709 return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A,
2717 return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
2725 return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
2732 return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
2740 return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
2748 return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
2755 return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
2763 return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
2771 return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
2778 return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
2786 return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
2794 return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
2801 return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
2809 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2816 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2823 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2830 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2837 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2844 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2851 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2858 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2865 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2872 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2879 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2886 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2893 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2900 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2907 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2914 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2921 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2928 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2935 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2942 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2949 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2956 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2963 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2970 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2977 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2984 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2991 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
2998 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
3005 return (__m128i)__builtin_ia32_pabsq128((__v2di)__A);
3010 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
3017 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
3024 return (__m256i)__builtin_ia32_pabsq256 ((__v4di)__A);
3029 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
3036 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
3043 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
3050 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
3057 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
3064 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
3071 return (__m128i)__builtin_ia32_pmaxsq128((__v2di)__A, (__v2di)__B);
3076 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3083 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3090 return (__m256i)__builtin_ia32_pmaxsq256((__v4di)__A, (__v4di)__B);
3095 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3102 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3109 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
3116 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
3123 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
3130 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
3137 return (__m128i)__builtin_ia32_pmaxuq128((__v2di)__A, (__v2di)__B);
3142 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3149 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3156 return (__m256i)__builtin_ia32_pmaxuq256((__v4di)__A, (__v4di)__B);
3161 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3168 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3175 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
3182 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
3189 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
3196 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
3203 return (__m128i)__builtin_ia32_pminsq128((__v2di)__A, (__v2di)__B);
3208 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3215 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3222 return (__m256i)__builtin_ia32_pminsq256((__v4di)__A, (__v4di)__B);
3227 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3234 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3241 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
3248 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
3255 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
3262 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
3269 return (__m128i)__builtin_ia32_pminuq128((__v2di)__A, (__v2di)__B);
3274 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3281 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3288 return (__m256i)__builtin_ia32_pminuq256((__v4di)__A, (__v4di)__B);
3293 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3300 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3305 #define _mm_roundscale_pd(A, imm) \ 3306 (__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \ 3308 (__v2df)_mm_setzero_pd(), \ 3312 #define _mm_mask_roundscale_pd(W, U, A, imm) \ 3313 (__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \ 3315 (__v2df)(__m128d)(W), \ 3319 #define _mm_maskz_roundscale_pd(U, A, imm) \ 3320 (__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \ 3322 (__v2df)_mm_setzero_pd(), \ 3326 #define _mm256_roundscale_pd(A, imm) \ 3327 (__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \ 3329 (__v4df)_mm256_setzero_pd(), \ 3333 #define _mm256_mask_roundscale_pd(W, U, A, imm) \ 3334 (__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \ 3336 (__v4df)(__m256d)(W), \ 3340 #define _mm256_maskz_roundscale_pd(U, A, imm) \ 3341 (__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \ 3343 (__v4df)_mm256_setzero_pd(), \ 3346 #define _mm_roundscale_ps(A, imm) \ 3347 (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \ 3348 (__v4sf)_mm_setzero_ps(), \ 3352 #define _mm_mask_roundscale_ps(W, U, A, imm) \ 3353 (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \ 3354 (__v4sf)(__m128)(W), \ 3358 #define _mm_maskz_roundscale_ps(U, A, imm) \ 3359 (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \ 3360 (__v4sf)_mm_setzero_ps(), \ 3363 #define _mm256_roundscale_ps(A, imm) \ 3364 (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \ 3365 (__v8sf)_mm256_setzero_ps(), \ 3368 #define _mm256_mask_roundscale_ps(W, U, A, imm) \ 3369 (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \ 3370 (__v8sf)(__m256)(W), \ 3374 #define _mm256_maskz_roundscale_ps(U, A, imm) \ 3375 (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \ 3376 (__v8sf)_mm256_setzero_ps(), \ 3381 return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
3391 return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
3399 return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
3408 return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
3418 return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
3426 return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
3435 return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
3444 return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
3452 return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
3461 return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
3471 return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
3479 return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
3486 #define _mm_i64scatter_pd(addr, index, v1, scale) \ 3487 __builtin_ia32_scatterdiv2df((void *)(addr), (__mmask8)-1, \ 3488 (__v2di)(__m128i)(index), \ 3489 (__v2df)(__m128d)(v1), (int)(scale)) 3491 #define _mm_mask_i64scatter_pd(addr, mask, index, v1, scale) \ 3492 __builtin_ia32_scatterdiv2df((void *)(addr), (__mmask8)(mask), \ 3493 (__v2di)(__m128i)(index), \ 3494 (__v2df)(__m128d)(v1), (int)(scale)) 3496 #define _mm_i64scatter_epi64(addr, index, v1, scale) \ 3497 __builtin_ia32_scatterdiv2di((void *)(addr), (__mmask8)-1, \ 3498 (__v2di)(__m128i)(index), \ 3499 (__v2di)(__m128i)(v1), (int)(scale)) 3501 #define _mm_mask_i64scatter_epi64(addr, mask, index, v1, scale) \ 3502 __builtin_ia32_scatterdiv2di((void *)(addr), (__mmask8)(mask), \ 3503 (__v2di)(__m128i)(index), \ 3504 (__v2di)(__m128i)(v1), (int)(scale)) 3506 #define _mm256_i64scatter_pd(addr, index, v1, scale) \ 3507 __builtin_ia32_scatterdiv4df((void *)(addr), (__mmask8)-1, \ 3508 (__v4di)(__m256i)(index), \ 3509 (__v4df)(__m256d)(v1), (int)(scale)) 3511 #define _mm256_mask_i64scatter_pd(addr, mask, index, v1, scale) \ 3512 __builtin_ia32_scatterdiv4df((void *)(addr), (__mmask8)(mask), \ 3513 (__v4di)(__m256i)(index), \ 3514 (__v4df)(__m256d)(v1), (int)(scale)) 3516 #define _mm256_i64scatter_epi64(addr, index, v1, scale) \ 3517 __builtin_ia32_scatterdiv4di((void *)(addr), (__mmask8)-1, \ 3518 (__v4di)(__m256i)(index), \ 3519 (__v4di)(__m256i)(v1), (int)(scale)) 3521 #define _mm256_mask_i64scatter_epi64(addr, mask, index, v1, scale) \ 3522 __builtin_ia32_scatterdiv4di((void *)(addr), (__mmask8)(mask), \ 3523 (__v4di)(__m256i)(index), \ 3524 (__v4di)(__m256i)(v1), (int)(scale)) 3526 #define _mm_i64scatter_ps(addr, index, v1, scale) \ 3527 __builtin_ia32_scatterdiv4sf((void *)(addr), (__mmask8)-1, \ 3528 (__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \ 3531 #define _mm_mask_i64scatter_ps(addr, mask, index, v1, scale) \ 3532 __builtin_ia32_scatterdiv4sf((void *)(addr), (__mmask8)(mask), \ 3533 (__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \ 3536 #define _mm_i64scatter_epi32(addr, index, v1, scale) \ 3537 __builtin_ia32_scatterdiv4si((void *)(addr), (__mmask8)-1, \ 3538 (__v2di)(__m128i)(index), \ 3539 (__v4si)(__m128i)(v1), (int)(scale)) 3541 #define _mm_mask_i64scatter_epi32(addr, mask, index, v1, scale) \ 3542 __builtin_ia32_scatterdiv4si((void *)(addr), (__mmask8)(mask), \ 3543 (__v2di)(__m128i)(index), \ 3544 (__v4si)(__m128i)(v1), (int)(scale)) 3546 #define _mm256_i64scatter_ps(addr, index, v1, scale) \ 3547 __builtin_ia32_scatterdiv8sf((void *)(addr), (__mmask8)-1, \ 3548 (__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \ 3551 #define _mm256_mask_i64scatter_ps(addr, mask, index, v1, scale) \ 3552 __builtin_ia32_scatterdiv8sf((void *)(addr), (__mmask8)(mask), \ 3553 (__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \ 3556 #define _mm256_i64scatter_epi32(addr, index, v1, scale) \ 3557 __builtin_ia32_scatterdiv8si((void *)(addr), (__mmask8)-1, \ 3558 (__v4di)(__m256i)(index), \ 3559 (__v4si)(__m128i)(v1), (int)(scale)) 3561 #define _mm256_mask_i64scatter_epi32(addr, mask, index, v1, scale) \ 3562 __builtin_ia32_scatterdiv8si((void *)(addr), (__mmask8)(mask), \ 3563 (__v4di)(__m256i)(index), \ 3564 (__v4si)(__m128i)(v1), (int)(scale)) 3566 #define _mm_i32scatter_pd(addr, index, v1, scale) \ 3567 __builtin_ia32_scattersiv2df((void *)(addr), (__mmask8)-1, \ 3568 (__v4si)(__m128i)(index), \ 3569 (__v2df)(__m128d)(v1), (int)(scale)) 3571 #define _mm_mask_i32scatter_pd(addr, mask, index, v1, scale) \ 3572 __builtin_ia32_scattersiv2df((void *)(addr), (__mmask8)(mask), \ 3573 (__v4si)(__m128i)(index), \ 3574 (__v2df)(__m128d)(v1), (int)(scale)) 3576 #define _mm_i32scatter_epi64(addr, index, v1, scale) \ 3577 __builtin_ia32_scattersiv2di((void *)(addr), (__mmask8)-1, \ 3578 (__v4si)(__m128i)(index), \ 3579 (__v2di)(__m128i)(v1), (int)(scale)) 3581 #define _mm_mask_i32scatter_epi64(addr, mask, index, v1, scale) \ 3582 __builtin_ia32_scattersiv2di((void *)(addr), (__mmask8)(mask), \ 3583 (__v4si)(__m128i)(index), \ 3584 (__v2di)(__m128i)(v1), (int)(scale)) 3586 #define _mm256_i32scatter_pd(addr, index, v1, scale) \ 3587 __builtin_ia32_scattersiv4df((void *)(addr), (__mmask8)-1, \ 3588 (__v4si)(__m128i)(index), \ 3589 (__v4df)(__m256d)(v1), (int)(scale)) 3591 #define _mm256_mask_i32scatter_pd(addr, mask, index, v1, scale) \ 3592 __builtin_ia32_scattersiv4df((void *)(addr), (__mmask8)(mask), \ 3593 (__v4si)(__m128i)(index), \ 3594 (__v4df)(__m256d)(v1), (int)(scale)) 3596 #define _mm256_i32scatter_epi64(addr, index, v1, scale) \ 3597 __builtin_ia32_scattersiv4di((void *)(addr), (__mmask8)-1, \ 3598 (__v4si)(__m128i)(index), \ 3599 (__v4di)(__m256i)(v1), (int)(scale)) 3601 #define _mm256_mask_i32scatter_epi64(addr, mask, index, v1, scale) \ 3602 __builtin_ia32_scattersiv4di((void *)(addr), (__mmask8)(mask), \ 3603 (__v4si)(__m128i)(index), \ 3604 (__v4di)(__m256i)(v1), (int)(scale)) 3606 #define _mm_i32scatter_ps(addr, index, v1, scale) \ 3607 __builtin_ia32_scattersiv4sf((void *)(addr), (__mmask8)-1, \ 3608 (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \ 3611 #define _mm_mask_i32scatter_ps(addr, mask, index, v1, scale) \ 3612 __builtin_ia32_scattersiv4sf((void *)(addr), (__mmask8)(mask), \ 3613 (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \ 3616 #define _mm_i32scatter_epi32(addr, index, v1, scale) \ 3617 __builtin_ia32_scattersiv4si((void *)(addr), (__mmask8)-1, \ 3618 (__v4si)(__m128i)(index), \ 3619 (__v4si)(__m128i)(v1), (int)(scale)) 3621 #define _mm_mask_i32scatter_epi32(addr, mask, index, v1, scale) \ 3622 __builtin_ia32_scattersiv4si((void *)(addr), (__mmask8)(mask), \ 3623 (__v4si)(__m128i)(index), \ 3624 (__v4si)(__m128i)(v1), (int)(scale)) 3626 #define _mm256_i32scatter_ps(addr, index, v1, scale) \ 3627 __builtin_ia32_scattersiv8sf((void *)(addr), (__mmask8)-1, \ 3628 (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \ 3631 #define _mm256_mask_i32scatter_ps(addr, mask, index, v1, scale) \ 3632 __builtin_ia32_scattersiv8sf((void *)(addr), (__mmask8)(mask), \ 3633 (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \ 3636 #define _mm256_i32scatter_epi32(addr, index, v1, scale) \ 3637 __builtin_ia32_scattersiv8si((void *)(addr), (__mmask8)-1, \ 3638 (__v8si)(__m256i)(index), \ 3639 (__v8si)(__m256i)(v1), (int)(scale)) 3641 #define _mm256_mask_i32scatter_epi32(addr, mask, index, v1, scale) \ 3642 __builtin_ia32_scattersiv8si((void *)(addr), (__mmask8)(mask), \ 3643 (__v8si)(__m256i)(index), \ 3644 (__v8si)(__m256i)(v1), (int)(scale)) 3648 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
3655 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
3662 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
3669 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
3676 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
3683 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
3690 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
3697 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
3704 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
3711 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
3718 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
3725 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
3732 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
3739 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
3746 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
3753 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
3760 return (__m128i)__builtin_ia32_vpermi2vard128((__v4si) __A, (__v4si)__I,
3767 return (__m128i)__builtin_ia32_selectd_128(__U,
3775 return (__m128i)__builtin_ia32_selectd_128(__U,
3783 return (__m128i)__builtin_ia32_selectd_128(__U,
3790 return (__m256i)__builtin_ia32_vpermi2vard256((__v8si)__A, (__v8si) __I,
3797 return (__m256i)__builtin_ia32_selectd_256(__U,
3805 return (__m256i)__builtin_ia32_selectd_256(__U,
3813 return (__m256i)__builtin_ia32_selectd_256(__U,
3820 return (__m128d)__builtin_ia32_vpermi2varpd128((__v2df)__A, (__v2di)__I,
3826 return (__m128d)__builtin_ia32_selectpd_128(__U,
3833 return (__m128d)__builtin_ia32_selectpd_128(__U,
3835 (__v2df)(__m128d)__I);
3840 return (__m128d)__builtin_ia32_selectpd_128(__U,
3847 return (__m256d)__builtin_ia32_vpermi2varpd256((__v4df)__A, (__v4di)__I,
3854 return (__m256d)__builtin_ia32_selectpd_256(__U,
3862 return (__m256d)__builtin_ia32_selectpd_256(__U,
3864 (__v4df)(__m256d)__I);
3870 return (__m256d)__builtin_ia32_selectpd_256(__U,
3877 return (__m128)__builtin_ia32_vpermi2varps128((__v4sf)__A, (__v4si)__I,
3883 return (__m128)__builtin_ia32_selectps_128(__U,
3890 return (__m128)__builtin_ia32_selectps_128(__U,
3892 (__v4sf)(__m128)__I);
3897 return (__m128)__builtin_ia32_selectps_128(__U,
3904 return (__m256)__builtin_ia32_vpermi2varps256((__v8sf)__A, (__v8si)__I,
3910 return (__m256)__builtin_ia32_selectps_256(__U,
3918 return (__m256)__builtin_ia32_selectps_256(__U,
3920 (__v8sf)(__m256)__I);
3926 return (__m256)__builtin_ia32_selectps_256(__U,
3933 return (__m128i)__builtin_ia32_vpermi2varq128((__v2di)__A, (__v2di)__I,
3940 return (__m128i)__builtin_ia32_selectq_128(__U,
3948 return (__m128i)__builtin_ia32_selectq_128(__U,
3956 return (__m128i)__builtin_ia32_selectq_128(__U,
3964 return (__m256i)__builtin_ia32_vpermi2varq256((__v4di)__A, (__v4di) __I,
3971 return (__m256i)__builtin_ia32_selectq_256(__U,
3979 return (__m256i)__builtin_ia32_selectq_256(__U,
3987 return (__m256i)__builtin_ia32_selectq_256(__U,
3995 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4003 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4011 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4019 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4027 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4035 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4043 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4051 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4059 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4067 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4075 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4083 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4091 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4099 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4107 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4115 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4123 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4131 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4139 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4147 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4156 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4164 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4172 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4180 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4188 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4196 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4204 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4212 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4220 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4228 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4236 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4244 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4252 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4260 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4268 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4276 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4284 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4292 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4300 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4308 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4314 #define _mm_rol_epi32(a, b) \ 4315 (__m128i)__builtin_ia32_prold128((__v4si)(__m128i)(a), (int)(b)) 4317 #define _mm_mask_rol_epi32(w, u, a, b) \ 4318 (__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \ 4319 (__v4si)_mm_rol_epi32((a), (b)), \ 4320 (__v4si)(__m128i)(w)) 4322 #define _mm_maskz_rol_epi32(u, a, b) \ 4323 (__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \ 4324 (__v4si)_mm_rol_epi32((a), (b)), \ 4325 (__v4si)_mm_setzero_si128()) 4327 #define _mm256_rol_epi32(a, b) \ 4328 (__m256i)__builtin_ia32_prold256((__v8si)(__m256i)(a), (int)(b)) 4330 #define _mm256_mask_rol_epi32(w, u, a, b) \ 4331 (__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \ 4332 (__v8si)_mm256_rol_epi32((a), (b)), \ 4333 (__v8si)(__m256i)(w)) 4335 #define _mm256_maskz_rol_epi32(u, a, b) \ 4336 (__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \ 4337 (__v8si)_mm256_rol_epi32((a), (b)), \ 4338 (__v8si)_mm256_setzero_si256()) 4340 #define _mm_rol_epi64(a, b) \ 4341 (__m128i)__builtin_ia32_prolq128((__v2di)(__m128i)(a), (int)(b)) 4343 #define _mm_mask_rol_epi64(w, u, a, b) \ 4344 (__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \ 4345 (__v2di)_mm_rol_epi64((a), (b)), \ 4346 (__v2di)(__m128i)(w)) 4348 #define _mm_maskz_rol_epi64(u, a, b) \ 4349 (__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \ 4350 (__v2di)_mm_rol_epi64((a), (b)), \ 4351 (__v2di)_mm_setzero_si128()) 4353 #define _mm256_rol_epi64(a, b) \ 4354 (__m256i)__builtin_ia32_prolq256((__v4di)(__m256i)(a), (int)(b)) 4356 #define _mm256_mask_rol_epi64(w, u, a, b) \ 4357 (__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \ 4358 (__v4di)_mm256_rol_epi64((a), (b)), \ 4359 (__v4di)(__m256i)(w)) 4361 #define _mm256_maskz_rol_epi64(u, a, b) \ 4362 (__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \ 4363 (__v4di)_mm256_rol_epi64((a), (b)), \ 4364 (__v4di)_mm256_setzero_si256()) 4369 return (__m128i)__builtin_ia32_prolvd128((__v4si)__A, (__v4si)__B);
4375 return (__m128i)__builtin_ia32_selectd_128(__U,
4383 return (__m128i)__builtin_ia32_selectd_128(__U,
4391 return (__m256i)__builtin_ia32_prolvd256((__v8si)__A, (__v8si)__B);
4397 return (__m256i)__builtin_ia32_selectd_256(__U,
4405 return (__m256i)__builtin_ia32_selectd_256(__U,
4413 return (__m128i)__builtin_ia32_prolvq128((__v2di)__A, (__v2di)__B);
4419 return (__m128i)__builtin_ia32_selectq_128(__U,
4427 return (__m128i)__builtin_ia32_selectq_128(__U,
4435 return (__m256i)__builtin_ia32_prolvq256((__v4di)__A, (__v4di)__B);
4441 return (__m256i)__builtin_ia32_selectq_256(__U,
4449 return (__m256i)__builtin_ia32_selectq_256(__U,
4454 #define _mm_ror_epi32(a, b) \ 4455 (__m128i)__builtin_ia32_prord128((__v4si)(__m128i)(a), (int)(b)) 4457 #define _mm_mask_ror_epi32(w, u, a, b) \ 4458 (__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \ 4459 (__v4si)_mm_ror_epi32((a), (b)), \ 4460 (__v4si)(__m128i)(w)) 4462 #define _mm_maskz_ror_epi32(u, a, b) \ 4463 (__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \ 4464 (__v4si)_mm_ror_epi32((a), (b)), \ 4465 (__v4si)_mm_setzero_si128()) 4467 #define _mm256_ror_epi32(a, b) \ 4468 (__m256i)__builtin_ia32_prord256((__v8si)(__m256i)(a), (int)(b)) 4470 #define _mm256_mask_ror_epi32(w, u, a, b) \ 4471 (__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \ 4472 (__v8si)_mm256_ror_epi32((a), (b)), \ 4473 (__v8si)(__m256i)(w)) 4475 #define _mm256_maskz_ror_epi32(u, a, b) \ 4476 (__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \ 4477 (__v8si)_mm256_ror_epi32((a), (b)), \ 4478 (__v8si)_mm256_setzero_si256()) 4480 #define _mm_ror_epi64(a, b) \ 4481 (__m128i)__builtin_ia32_prorq128((__v2di)(__m128i)(a), (int)(b)) 4483 #define _mm_mask_ror_epi64(w, u, a, b) \ 4484 (__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \ 4485 (__v2di)_mm_ror_epi64((a), (b)), \ 4486 (__v2di)(__m128i)(w)) 4488 #define _mm_maskz_ror_epi64(u, a, b) \ 4489 (__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \ 4490 (__v2di)_mm_ror_epi64((a), (b)), \ 4491 (__v2di)_mm_setzero_si128()) 4493 #define _mm256_ror_epi64(a, b) \ 4494 (__m256i)__builtin_ia32_prorq256((__v4di)(__m256i)(a), (int)(b)) 4496 #define _mm256_mask_ror_epi64(w, u, a, b) \ 4497 (__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \ 4498 (__v4di)_mm256_ror_epi64((a), (b)), \ 4499 (__v4di)(__m256i)(w)) 4501 #define _mm256_maskz_ror_epi64(u, a, b) \ 4502 (__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \ 4503 (__v4di)_mm256_ror_epi64((a), (b)), \ 4504 (__v4di)_mm256_setzero_si256()) 4509 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4517 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4525 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4533 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4541 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4549 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4557 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4565 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4573 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4581 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4589 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4597 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4605 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4613 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4621 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4629 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4637 return (__m128i)__builtin_ia32_prorvd128((__v4si)__A, (__v4si)__B);
4643 return (__m128i)__builtin_ia32_selectd_128(__U,
4651 return (__m128i)__builtin_ia32_selectd_128(__U,
4659 return (__m256i)__builtin_ia32_prorvd256((__v8si)__A, (__v8si)__B);
4665 return (__m256i)__builtin_ia32_selectd_256(__U,
4673 return (__m256i)__builtin_ia32_selectd_256(__U,
4681 return (__m128i)__builtin_ia32_prorvq128((__v2di)__A, (__v2di)__B);
4687 return (__m128i)__builtin_ia32_selectq_128(__U,
4695 return (__m128i)__builtin_ia32_selectq_128(__U,
4703 return (__m256i)__builtin_ia32_prorvq256((__v4di)__A, (__v4di)__B);
4709 return (__m256i)__builtin_ia32_selectq_256(__U,
4717 return (__m256i)__builtin_ia32_selectq_256(__U,
4725 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4733 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4741 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4749 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4757 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4765 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4773 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4781 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4789 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4797 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4805 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4813 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4821 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4829 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4837 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4845 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4853 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4861 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4869 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4877 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4885 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4893 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4901 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4909 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4917 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4925 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4933 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4941 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4949 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4957 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4965 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4973 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4981 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4989 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4997 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
5005 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
5013 return (__m128i)__builtin_ia32_psravq128((__v2di)__X, (__v2di)__Y);
5019 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
5027 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
5035 return (__m256i)__builtin_ia32_psravq256((__v4di)__X, (__v4di) __Y);
5041 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
5049 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
5057 return (__m128i) __builtin_ia32_selectd_128 ((
__mmask8) __U,
5065 return (__m128i) __builtin_ia32_selectd_128 ((
__mmask8) __U,
5074 return (__m256i) __builtin_ia32_selectd_256 ((
__mmask8) __U,
5082 return (__m256i) __builtin_ia32_selectd_256 ((
__mmask8) __U,
5090 return *(__m128i *) __P;
5096 return (__m128i) __builtin_ia32_movdqa32load128_mask ((__v4si *) __P,
5105 return (__m128i) __builtin_ia32_movdqa32load128_mask ((__v4si *) __P,
5115 return *(__m256i *) __P;
5121 return (__m256i) __builtin_ia32_movdqa32load256_mask ((__v8si *) __P,
5130 return (__m256i) __builtin_ia32_movdqa32load256_mask ((__v8si *) __P,
5140 *(__m128i *) __P = __A;
5146 __builtin_ia32_movdqa32store128_mask ((__v4si *) __P,
5154 *(__m256i *) __P = __A;
5160 __builtin_ia32_movdqa32store256_mask ((__v8si *) __P,
5168 return (__m128i) __builtin_ia32_selectq_128 ((
__mmask8) __U,
5176 return (__m128i) __builtin_ia32_selectq_128 ((
__mmask8) __U,
5184 return (__m256i) __builtin_ia32_selectq_256 ((
__mmask8) __U,
5192 return (__m256i) __builtin_ia32_selectq_256 ((
__mmask8) __U,
5200 return *(__m128i *) __P;
5206 return (__m128i) __builtin_ia32_movdqa64load128_mask ((__v2di *) __P,
5215 return (__m128i) __builtin_ia32_movdqa64load128_mask ((__v2di *) __P,
5225 return *(__m256i *) __P;
5231 return (__m256i) __builtin_ia32_movdqa64load256_mask ((__v4di *) __P,
5240 return (__m256i) __builtin_ia32_movdqa64load256_mask ((__v4di *) __P,
5250 *(__m128i *) __P = __A;
5256 __builtin_ia32_movdqa64store128_mask ((__v2di *) __P,
5264 *(__m256i *) __P = __A;
5270 __builtin_ia32_movdqa64store256_mask ((__v4di *) __P,
5278 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5286 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5294 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5302 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5310 return (__m128i)__builtin_ia32_selectd_128(__M,
5318 return (__m128i)__builtin_ia32_selectd_128(__M,
5326 return (__m256i)__builtin_ia32_selectd_256(__M,
5334 return (__m256i)__builtin_ia32_selectd_256(__M,
5343 return (__m128i) __builtin_ia32_selectq_128(__M,
5351 return (__m128i) __builtin_ia32_selectq_128(__M,
5359 return (__m256i) __builtin_ia32_selectq_256(__M,
5367 return (__m256i) __builtin_ia32_selectq_256(__M,
5372 #define _mm_fixupimm_pd(A, B, C, imm) \ 5373 (__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \ 5374 (__v2df)(__m128d)(B), \ 5375 (__v2di)(__m128i)(C), (int)(imm), \ 5378 #define _mm_mask_fixupimm_pd(A, U, B, C, imm) \ 5379 (__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \ 5380 (__v2df)(__m128d)(B), \ 5381 (__v2di)(__m128i)(C), (int)(imm), \ 5384 #define _mm_maskz_fixupimm_pd(U, A, B, C, imm) \ 5385 (__m128d)__builtin_ia32_fixupimmpd128_maskz((__v2df)(__m128d)(A), \ 5386 (__v2df)(__m128d)(B), \ 5387 (__v2di)(__m128i)(C), \ 5388 (int)(imm), (__mmask8)(U)) 5390 #define _mm256_fixupimm_pd(A, B, C, imm) \ 5391 (__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \ 5392 (__v4df)(__m256d)(B), \ 5393 (__v4di)(__m256i)(C), (int)(imm), \ 5396 #define _mm256_mask_fixupimm_pd(A, U, B, C, imm) \ 5397 (__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \ 5398 (__v4df)(__m256d)(B), \ 5399 (__v4di)(__m256i)(C), (int)(imm), \ 5402 #define _mm256_maskz_fixupimm_pd(U, A, B, C, imm) \ 5403 (__m256d)__builtin_ia32_fixupimmpd256_maskz((__v4df)(__m256d)(A), \ 5404 (__v4df)(__m256d)(B), \ 5405 (__v4di)(__m256i)(C), \ 5406 (int)(imm), (__mmask8)(U)) 5408 #define _mm_fixupimm_ps(A, B, C, imm) \ 5409 (__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \ 5410 (__v4sf)(__m128)(B), \ 5411 (__v4si)(__m128i)(C), (int)(imm), \ 5414 #define _mm_mask_fixupimm_ps(A, U, B, C, imm) \ 5415 (__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \ 5416 (__v4sf)(__m128)(B), \ 5417 (__v4si)(__m128i)(C), (int)(imm), \ 5420 #define _mm_maskz_fixupimm_ps(U, A, B, C, imm) \ 5421 (__m128)__builtin_ia32_fixupimmps128_maskz((__v4sf)(__m128)(A), \ 5422 (__v4sf)(__m128)(B), \ 5423 (__v4si)(__m128i)(C), (int)(imm), \ 5426 #define _mm256_fixupimm_ps(A, B, C, imm) \ 5427 (__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \ 5428 (__v8sf)(__m256)(B), \ 5429 (__v8si)(__m256i)(C), (int)(imm), \ 5432 #define _mm256_mask_fixupimm_ps(A, U, B, C, imm) \ 5433 (__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \ 5434 (__v8sf)(__m256)(B), \ 5435 (__v8si)(__m256i)(C), (int)(imm), \ 5438 #define _mm256_maskz_fixupimm_ps(U, A, B, C, imm) \ 5439 (__m256)__builtin_ia32_fixupimmps256_maskz((__v8sf)(__m256)(A), \ 5440 (__v8sf)(__m256)(B), \ 5441 (__v8si)(__m256i)(C), (int)(imm), \ 5447 return (__m128d) __builtin_ia32_loadapd128_mask ((__v2df *) __P,
5455 return (__m128d) __builtin_ia32_loadapd128_mask ((__v2df *) __P,
5464 return (__m256d) __builtin_ia32_loadapd256_mask ((__v4df *) __P,
5472 return (__m256d) __builtin_ia32_loadapd256_mask ((__v4df *) __P,
5481 return (__m128) __builtin_ia32_loadaps128_mask ((__v4sf *) __P,
5489 return (__m128) __builtin_ia32_loadaps128_mask ((__v4sf *) __P,
5498 return (__m256) __builtin_ia32_loadaps256_mask ((__v8sf *) __P,
5506 return (__m256) __builtin_ia32_loadaps256_mask ((__v8sf *) __P,
5515 struct __loadu_epi64 {
5518 return ((
struct __loadu_epi64*)__P)->__v;
5524 return (__m128i) __builtin_ia32_loaddqudi128_mask ((__v2di *) __P,
5532 return (__m128i) __builtin_ia32_loaddqudi128_mask ((__v2di *) __P,
5541 struct __loadu_epi64 {
5544 return ((
struct __loadu_epi64*)__P)->__v;
5550 return (__m256i) __builtin_ia32_loaddqudi256_mask ((__v4di *) __P,
5558 return (__m256i) __builtin_ia32_loaddqudi256_mask ((__v4di *) __P,
5567 struct __loadu_epi32 {
5570 return ((
struct __loadu_epi32*)__P)->__v;
5576 return (__m128i) __builtin_ia32_loaddqusi128_mask ((__v4si *) __P,
5584 return (__m128i) __builtin_ia32_loaddqusi128_mask ((__v4si *) __P,
5593 struct __loadu_epi32 {
5596 return ((
struct __loadu_epi32*)__P)->__v;
5602 return (__m256i) __builtin_ia32_loaddqusi256_mask ((__v8si *) __P,
5610 return (__m256i) __builtin_ia32_loaddqusi256_mask ((__v8si *) __P,
5619 return (__m128d) __builtin_ia32_loadupd128_mask ((__v2df *) __P,
5627 return (__m128d) __builtin_ia32_loadupd128_mask ((__v2df *) __P,
5636 return (__m256d) __builtin_ia32_loadupd256_mask ((__v4df *) __P,
5644 return (__m256d) __builtin_ia32_loadupd256_mask ((__v4df *) __P,
5653 return (__m128) __builtin_ia32_loadups128_mask ((__v4sf *) __P,
5661 return (__m128) __builtin_ia32_loadups128_mask ((__v4sf *) __P,
5670 return (__m256) __builtin_ia32_loadups256_mask ((__v8sf *) __P,
5678 return (__m256) __builtin_ia32_loadups256_mask ((__v8sf *) __P,
5687 __builtin_ia32_storeapd128_mask ((__v2df *) __P,
5695 __builtin_ia32_storeapd256_mask ((__v4df *) __P,
5703 __builtin_ia32_storeaps128_mask ((__v4sf *) __P,
5711 __builtin_ia32_storeaps256_mask ((__v8sf *) __P,
5719 struct __storeu_epi64 {
5722 ((
struct __storeu_epi64*)__P)->__v = __A;
5728 __builtin_ia32_storedqudi128_mask ((__v2di *) __P,
5736 struct __storeu_epi64 {
5739 ((
struct __storeu_epi64*)__P)->__v = __A;
5745 __builtin_ia32_storedqudi256_mask ((__v4di *) __P,
5753 struct __storeu_epi32 {
5756 ((
struct __storeu_epi32*)__P)->__v = __A;
5762 __builtin_ia32_storedqusi128_mask ((__v4si *) __P,
5770 struct __storeu_epi32 {
5773 ((
struct __storeu_epi32*)__P)->__v = __A;
5779 __builtin_ia32_storedqusi256_mask ((__v8si *) __P,
5787 __builtin_ia32_storeupd128_mask ((__v2df *) __P,
5795 __builtin_ia32_storeupd256_mask ((__v4df *) __P,
5803 __builtin_ia32_storeups128_mask ((__v4sf *) __P,
5811 __builtin_ia32_storeups256_mask ((__v8sf *) __P,
5820 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5828 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5836 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5844 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5852 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
5860 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
5868 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
5876 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
5884 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5892 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5900 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5908 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5916 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
5924 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
5932 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
5940 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
5948 return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
5957 return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
5965 return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
5974 return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
5983 return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
5991 return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
6000 return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
6009 return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
6017 return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
6026 return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
6035 return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
6043 return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
6049 #define _mm_mask_permute_pd(W, U, X, C) \ 6050 (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ 6051 (__v2df)_mm_permute_pd((X), (C)), \ 6052 (__v2df)(__m128d)(W)) 6054 #define _mm_maskz_permute_pd(U, X, C) \ 6055 (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ 6056 (__v2df)_mm_permute_pd((X), (C)), \ 6057 (__v2df)_mm_setzero_pd()) 6059 #define _mm256_mask_permute_pd(W, U, X, C) \ 6060 (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ 6061 (__v4df)_mm256_permute_pd((X), (C)), \ 6062 (__v4df)(__m256d)(W)) 6064 #define _mm256_maskz_permute_pd(U, X, C) \ 6065 (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ 6066 (__v4df)_mm256_permute_pd((X), (C)), \ 6067 (__v4df)_mm256_setzero_pd()) 6069 #define _mm_mask_permute_ps(W, U, X, C) \ 6070 (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ 6071 (__v4sf)_mm_permute_ps((X), (C)), \ 6072 (__v4sf)(__m128)(W)) 6074 #define _mm_maskz_permute_ps(U, X, C) \ 6075 (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ 6076 (__v4sf)_mm_permute_ps((X), (C)), \ 6077 (__v4sf)_mm_setzero_ps()) 6079 #define _mm256_mask_permute_ps(W, U, X, C) \ 6080 (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ 6081 (__v8sf)_mm256_permute_ps((X), (C)), \ 6082 (__v8sf)(__m256)(W)) 6084 #define _mm256_maskz_permute_ps(U, X, C) \ 6085 (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ 6086 (__v8sf)_mm256_permute_ps((X), (C)), \ 6087 (__v8sf)_mm256_setzero_ps()) 6092 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
6100 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
6108 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
6116 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
6124 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
6132 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
6140 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
6148 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
6264 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6272 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6280 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6288 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6296 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
6304 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
6312 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
6320 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
6328 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6336 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6344 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6352 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6360 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
6368 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
6376 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
6384 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
6392 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6400 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6408 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6416 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6424 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6432 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6440 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6448 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6456 return (__m128i)__builtin_ia32_psraq128((__v2di)__A, (__v2di)__B);
6462 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U, \
6470 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U, \
6478 return (__m256i)__builtin_ia32_psraq256((__v4di) __A, (__v2di) __B);
6484 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U, \
6492 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U, \
6500 return (__m128i)__builtin_ia32_psraqi128((__v2di)__A, __imm);
6506 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U, \
6514 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U, \
6522 return (__m256i)__builtin_ia32_psraqi256((__v4di)__A, __imm);
6528 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U, \
6536 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U, \
6541 #define _mm_ternarylogic_epi32(A, B, C, imm) \ 6542 (__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \ 6543 (__v4si)(__m128i)(B), \ 6544 (__v4si)(__m128i)(C), (int)(imm), \ 6547 #define _mm_mask_ternarylogic_epi32(A, U, B, C, imm) \ 6548 (__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \ 6549 (__v4si)(__m128i)(B), \ 6550 (__v4si)(__m128i)(C), (int)(imm), \ 6553 #define _mm_maskz_ternarylogic_epi32(U, A, B, C, imm) \ 6554 (__m128i)__builtin_ia32_pternlogd128_maskz((__v4si)(__m128i)(A), \ 6555 (__v4si)(__m128i)(B), \ 6556 (__v4si)(__m128i)(C), (int)(imm), \ 6559 #define _mm256_ternarylogic_epi32(A, B, C, imm) \ 6560 (__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \ 6561 (__v8si)(__m256i)(B), \ 6562 (__v8si)(__m256i)(C), (int)(imm), \ 6565 #define _mm256_mask_ternarylogic_epi32(A, U, B, C, imm) \ 6566 (__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \ 6567 (__v8si)(__m256i)(B), \ 6568 (__v8si)(__m256i)(C), (int)(imm), \ 6571 #define _mm256_maskz_ternarylogic_epi32(U, A, B, C, imm) \ 6572 (__m256i)__builtin_ia32_pternlogd256_maskz((__v8si)(__m256i)(A), \ 6573 (__v8si)(__m256i)(B), \ 6574 (__v8si)(__m256i)(C), (int)(imm), \ 6577 #define _mm_ternarylogic_epi64(A, B, C, imm) \ 6578 (__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \ 6579 (__v2di)(__m128i)(B), \ 6580 (__v2di)(__m128i)(C), (int)(imm), \ 6583 #define _mm_mask_ternarylogic_epi64(A, U, B, C, imm) \ 6584 (__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \ 6585 (__v2di)(__m128i)(B), \ 6586 (__v2di)(__m128i)(C), (int)(imm), \ 6589 #define _mm_maskz_ternarylogic_epi64(U, A, B, C, imm) \ 6590 (__m128i)__builtin_ia32_pternlogq128_maskz((__v2di)(__m128i)(A), \ 6591 (__v2di)(__m128i)(B), \ 6592 (__v2di)(__m128i)(C), (int)(imm), \ 6595 #define _mm256_ternarylogic_epi64(A, B, C, imm) \ 6596 (__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \ 6597 (__v4di)(__m256i)(B), \ 6598 (__v4di)(__m256i)(C), (int)(imm), \ 6601 #define _mm256_mask_ternarylogic_epi64(A, U, B, C, imm) \ 6602 (__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \ 6603 (__v4di)(__m256i)(B), \ 6604 (__v4di)(__m256i)(C), (int)(imm), \ 6607 #define _mm256_maskz_ternarylogic_epi64(U, A, B, C, imm) \ 6608 (__m256i)__builtin_ia32_pternlogq256_maskz((__v4di)(__m256i)(A), \ 6609 (__v4di)(__m256i)(B), \ 6610 (__v4di)(__m256i)(C), (int)(imm), \ 6615 #define _mm256_shuffle_f32x4(A, B, imm) \ 6616 (__m256)__builtin_ia32_shuf_f32x4_256((__v8sf)(__m256)(A), \ 6617 (__v8sf)(__m256)(B), (int)(imm)) 6619 #define _mm256_mask_shuffle_f32x4(W, U, A, B, imm) \ 6620 (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ 6621 (__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \ 6622 (__v8sf)(__m256)(W)) 6624 #define _mm256_maskz_shuffle_f32x4(U, A, B, imm) \ 6625 (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ 6626 (__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \ 6627 (__v8sf)_mm256_setzero_ps()) 6629 #define _mm256_shuffle_f64x2(A, B, imm) \ 6630 (__m256d)__builtin_ia32_shuf_f64x2_256((__v4df)(__m256d)(A), \ 6631 (__v4df)(__m256d)(B), (int)(imm)) 6633 #define _mm256_mask_shuffle_f64x2(W, U, A, B, imm) \ 6634 (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ 6635 (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \ 6636 (__v4df)(__m256d)(W)) 6638 #define _mm256_maskz_shuffle_f64x2(U, A, B, imm) \ 6639 (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ 6640 (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \ 6641 (__v4df)_mm256_setzero_pd()) 6643 #define _mm256_shuffle_i32x4(A, B, imm) \ 6644 (__m256i)__builtin_ia32_shuf_i32x4_256((__v8si)(__m256i)(A), \ 6645 (__v8si)(__m256i)(B), (int)(imm)) 6647 #define _mm256_mask_shuffle_i32x4(W, U, A, B, imm) \ 6648 (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ 6649 (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \ 6650 (__v8si)(__m256i)(W)) 6652 #define _mm256_maskz_shuffle_i32x4(U, A, B, imm) \ 6653 (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ 6654 (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \ 6655 (__v8si)_mm256_setzero_si256()) 6657 #define _mm256_shuffle_i64x2(A, B, imm) \ 6658 (__m256i)__builtin_ia32_shuf_i64x2_256((__v4di)(__m256i)(A), \ 6659 (__v4di)(__m256i)(B), (int)(imm)) 6661 #define _mm256_mask_shuffle_i64x2(W, U, A, B, imm) \ 6662 (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ 6663 (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \ 6664 (__v4di)(__m256i)(W)) 6667 #define _mm256_maskz_shuffle_i64x2(U, A, B, imm) \ 6668 (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ 6669 (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \ 6670 (__v4di)_mm256_setzero_si256()) 6672 #define _mm_mask_shuffle_pd(W, U, A, B, M) \ 6673 (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ 6674 (__v2df)_mm_shuffle_pd((A), (B), (M)), \ 6675 (__v2df)(__m128d)(W)) 6677 #define _mm_maskz_shuffle_pd(U, A, B, M) \ 6678 (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ 6679 (__v2df)_mm_shuffle_pd((A), (B), (M)), \ 6680 (__v2df)_mm_setzero_pd()) 6682 #define _mm256_mask_shuffle_pd(W, U, A, B, M) \ 6683 (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ 6684 (__v4df)_mm256_shuffle_pd((A), (B), (M)), \ 6685 (__v4df)(__m256d)(W)) 6687 #define _mm256_maskz_shuffle_pd(U, A, B, M) \ 6688 (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ 6689 (__v4df)_mm256_shuffle_pd((A), (B), (M)), \ 6690 (__v4df)_mm256_setzero_pd()) 6692 #define _mm_mask_shuffle_ps(W, U, A, B, M) \ 6693 (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ 6694 (__v4sf)_mm_shuffle_ps((A), (B), (M)), \ 6695 (__v4sf)(__m128)(W)) 6697 #define _mm_maskz_shuffle_ps(U, A, B, M) \ 6698 (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ 6699 (__v4sf)_mm_shuffle_ps((A), (B), (M)), \ 6700 (__v4sf)_mm_setzero_ps()) 6702 #define _mm256_mask_shuffle_ps(W, U, A, B, M) \ 6703 (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ 6704 (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \ 6705 (__v8sf)(__m256)(W)) 6707 #define _mm256_maskz_shuffle_ps(U, A, B, M) \ 6708 (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ 6709 (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \ 6710 (__v8sf)_mm256_setzero_ps()) 6715 return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
6724 return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
6732 return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
6741 return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
6750 return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
6758 return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
6767 return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
6776 return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
6784 return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
6793 return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
6802 return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
6810 return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
6819 return (__m256)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A,
6820 0, 1, 2, 3, 0, 1, 2, 3);
6826 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__M,
6834 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__M,
6842 return (__m256i)__builtin_shufflevector((__v4si)__A, (__v4si)__A,
6843 0, 1, 2, 3, 0, 1, 2, 3);
6849 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
6857 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
6865 return (__m256d)__builtin_ia32_selectpd_256(__M,
6873 return (__m256d)__builtin_ia32_selectpd_256(__M,
6881 return (__m128)__builtin_ia32_selectps_128(__M,
6889 return (__m128)__builtin_ia32_selectps_128(__M,
6897 return (__m256)__builtin_ia32_selectps_256(__M,
6905 return (__m256)__builtin_ia32_selectps_256(__M,
6913 return (__m128i)__builtin_ia32_selectd_128(__M,
6921 return (__m128i)__builtin_ia32_selectd_128(__M,
6929 return (__m256i)__builtin_ia32_selectd_256(__M,
6937 return (__m256i)__builtin_ia32_selectd_256(__M,
6945 return (__m128i)__builtin_ia32_selectq_128(__M,
6953 return (__m128i)__builtin_ia32_selectq_128(__M,
6961 return (__m256i)__builtin_ia32_selectq_256(__M,
6969 return (__m256i)__builtin_ia32_selectq_256(__M,
6977 return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
6985 return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
6986 (__v16qi) __O, __M);
6992 return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
7000 __builtin_ia32_pmovsdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
7006 return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
7014 return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
7015 (__v16qi) __O, __M);
7021 return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
7029 __builtin_ia32_pmovsdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M);
7035 return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
7043 return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
7051 return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
7059 __builtin_ia32_pmovsdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
7065 return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
7073 return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
7080 return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
7088 __builtin_ia32_pmovsdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
7094 return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
7102 return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
7103 (__v16qi) __O, __M);
7109 return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
7117 __builtin_ia32_pmovsqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
7123 return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
7131 return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
7132 (__v16qi) __O, __M);
7138 return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
7146 __builtin_ia32_pmovsqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
7152 return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
7160 return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
7167 return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
7175 __builtin_ia32_pmovsqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
7181 return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
7189 return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
7197 return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
7205 __builtin_ia32_pmovsqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
7211 return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
7219 return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
7226 return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
7234 __builtin_ia32_pmovsqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
7240 return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
7248 return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
7255 return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
7263 __builtin_ia32_pmovsqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
7269 return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
7277 return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
7285 return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
7293 __builtin_ia32_pmovusdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
7299 return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
7307 return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
7315 return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
7323 __builtin_ia32_pmovusdb256mem_mask ((__v16qi*) __P, (__v8si) __A, __M);
7329 return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
7337 return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
7344 return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
7352 __builtin_ia32_pmovusdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
7358 return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
7366 return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
7373 return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
7381 __builtin_ia32_pmovusdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
7387 return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
7395 return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
7403 return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
7411 __builtin_ia32_pmovusqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
7417 return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
7425 return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
7433 return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
7441 __builtin_ia32_pmovusqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
7447 return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
7455 return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
7462 return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
7470 __builtin_ia32_pmovusqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
7476 return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
7484 return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
7491 return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
7499 __builtin_ia32_pmovusqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
7505 return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
7513 return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
7520 return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
7528 __builtin_ia32_pmovusqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
7534 return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
7542 return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
7549 return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
7557 __builtin_ia32_pmovusqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
7563 return (__m128i)__builtin_shufflevector(
7564 __builtin_convertvector((__v4si)__A, __v4qi), (__v4qi){0, 0, 0, 0}, 0, 1,
7565 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7);
7571 return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A,
7572 (__v16qi) __O, __M);
7578 return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A,
7587 __builtin_ia32_pmovdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
7593 return (__m128i)__builtin_shufflevector(
7594 __builtin_convertvector((__v8si)__A, __v8qi),
7595 (__v8qi){0, 0, 0, 0, 0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
7602 return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
7603 (__v16qi) __O, __M);
7609 return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
7617 __builtin_ia32_pmovdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M);
7623 return (__m128i)__builtin_shufflevector(
7624 __builtin_convertvector((__v4si)__A, __v4hi), (__v4hi){0, 0, 0, 0}, 0, 1,
7631 return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A,
7638 return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A,
7646 __builtin_ia32_pmovdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
7652 return (__m128i)__builtin_convertvector((__v8si)__A, __v8hi);
7658 return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
7665 return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
7673 __builtin_ia32_pmovdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
7679 return (__m128i)__builtin_shufflevector(
7680 __builtin_convertvector((__v2di)__A, __v2qi), (__v2qi){0, 0}, 0, 1, 2, 3,
7681 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3);
7687 return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A,
7688 (__v16qi) __O, __M);
7694 return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A,
7702 __builtin_ia32_pmovqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
7708 return (__m128i)__builtin_shufflevector(
7709 __builtin_convertvector((__v4di)__A, __v4qi), (__v4qi){0, 0, 0, 0}, 0, 1,
7710 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7);
7716 return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A,
7717 (__v16qi) __O, __M);
7723 return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A,
7731 __builtin_ia32_pmovqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
7737 return (__m128i)__builtin_shufflevector(
7738 __builtin_convertvector((__v2di)__A, __v2si), (__v2si){0, 0}, 0, 1, 2, 3);
7744 return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A,
7751 return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A,
7759 __builtin_ia32_pmovqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
7765 return (__m128i)__builtin_convertvector((__v4di)__A, __v4si);
7771 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
7779 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
7787 __builtin_ia32_pmovqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
7793 return (__m128i)__builtin_shufflevector(
7794 __builtin_convertvector((__v2di)__A, __v2hi), (__v2hi){0, 0}, 0, 1, 2, 3,
7801 return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A,
7809 return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A,
7817 __builtin_ia32_pmovqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
7823 return (__m128i)__builtin_shufflevector(
7824 __builtin_convertvector((__v4di)__A, __v4hi), (__v4hi){0, 0, 0, 0}, 0, 1,
7831 return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A,
7838 return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A,
7846 __builtin_ia32_pmovqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
7849 #define _mm256_extractf32x4_ps(A, imm) \ 7850 (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \ 7852 (__v4sf)_mm_undefined_ps(), \ 7855 #define _mm256_mask_extractf32x4_ps(W, U, A, imm) \ 7856 (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \ 7858 (__v4sf)(__m128)(W), \ 7861 #define _mm256_maskz_extractf32x4_ps(U, A, imm) \ 7862 (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \ 7864 (__v4sf)_mm_setzero_ps(), \ 7867 #define _mm256_extracti32x4_epi32(A, imm) \ 7868 (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \ 7870 (__v4si)_mm_undefined_si128(), \ 7873 #define _mm256_mask_extracti32x4_epi32(W, U, A, imm) \ 7874 (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \ 7876 (__v4si)(__m128i)(W), \ 7879 #define _mm256_maskz_extracti32x4_epi32(U, A, imm) \ 7880 (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \ 7882 (__v4si)_mm_setzero_si128(), \ 7885 #define _mm256_insertf32x4(A, B, imm) \ 7886 (__m256)__builtin_ia32_insertf32x4_256((__v8sf)(__m256)(A), \ 7887 (__v4sf)(__m128)(B), (int)(imm)) 7889 #define _mm256_mask_insertf32x4(W, U, A, B, imm) \ 7890 (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ 7891 (__v8sf)_mm256_insertf32x4((A), (B), (imm)), \ 7892 (__v8sf)(__m256)(W)) 7894 #define _mm256_maskz_insertf32x4(U, A, B, imm) \ 7895 (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ 7896 (__v8sf)_mm256_insertf32x4((A), (B), (imm)), \ 7897 (__v8sf)_mm256_setzero_ps()) 7899 #define _mm256_inserti32x4(A, B, imm) \ 7900 (__m256i)__builtin_ia32_inserti32x4_256((__v8si)(__m256i)(A), \ 7901 (__v4si)(__m128i)(B), (int)(imm)) 7903 #define _mm256_mask_inserti32x4(W, U, A, B, imm) \ 7904 (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ 7905 (__v8si)_mm256_inserti32x4((A), (B), (imm)), \ 7906 (__v8si)(__m256i)(W)) 7908 #define _mm256_maskz_inserti32x4(U, A, B, imm) \ 7909 (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ 7910 (__v8si)_mm256_inserti32x4((A), (B), (imm)), \ 7911 (__v8si)_mm256_setzero_si256()) 7913 #define _mm_getmant_pd(A, B, C) \ 7914 (__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \ 7915 (int)(((C)<<2) | (B)), \ 7916 (__v2df)_mm_setzero_pd(), \ 7919 #define _mm_mask_getmant_pd(W, U, A, B, C) \ 7920 (__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \ 7921 (int)(((C)<<2) | (B)), \ 7922 (__v2df)(__m128d)(W), \ 7925 #define _mm_maskz_getmant_pd(U, A, B, C) \ 7926 (__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \ 7927 (int)(((C)<<2) | (B)), \ 7928 (__v2df)_mm_setzero_pd(), \ 7931 #define _mm256_getmant_pd(A, B, C) \ 7932 (__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \ 7933 (int)(((C)<<2) | (B)), \ 7934 (__v4df)_mm256_setzero_pd(), \ 7937 #define _mm256_mask_getmant_pd(W, U, A, B, C) \ 7938 (__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \ 7939 (int)(((C)<<2) | (B)), \ 7940 (__v4df)(__m256d)(W), \ 7943 #define _mm256_maskz_getmant_pd(U, A, B, C) \ 7944 (__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \ 7945 (int)(((C)<<2) | (B)), \ 7946 (__v4df)_mm256_setzero_pd(), \ 7949 #define _mm_getmant_ps(A, B, C) \ 7950 (__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \ 7951 (int)(((C)<<2) | (B)), \ 7952 (__v4sf)_mm_setzero_ps(), \ 7955 #define _mm_mask_getmant_ps(W, U, A, B, C) \ 7956 (__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \ 7957 (int)(((C)<<2) | (B)), \ 7958 (__v4sf)(__m128)(W), \ 7961 #define _mm_maskz_getmant_ps(U, A, B, C) \ 7962 (__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \ 7963 (int)(((C)<<2) | (B)), \ 7964 (__v4sf)_mm_setzero_ps(), \ 7967 #define _mm256_getmant_ps(A, B, C) \ 7968 (__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \ 7969 (int)(((C)<<2) | (B)), \ 7970 (__v8sf)_mm256_setzero_ps(), \ 7973 #define _mm256_mask_getmant_ps(W, U, A, B, C) \ 7974 (__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \ 7975 (int)(((C)<<2) | (B)), \ 7976 (__v8sf)(__m256)(W), \ 7979 #define _mm256_maskz_getmant_ps(U, A, B, C) \ 7980 (__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \ 7981 (int)(((C)<<2) | (B)), \ 7982 (__v8sf)_mm256_setzero_ps(), \ 7985 #define _mm_mmask_i64gather_pd(v1_old, mask, index, addr, scale) \ 7986 (__m128d)__builtin_ia32_gather3div2df((__v2df)(__m128d)(v1_old), \ 7987 (void const *)(addr), \ 7988 (__v2di)(__m128i)(index), \ 7989 (__mmask8)(mask), (int)(scale)) 7991 #define _mm_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) \ 7992 (__m128i)__builtin_ia32_gather3div2di((__v2di)(__m128i)(v1_old), \ 7993 (void const *)(addr), \ 7994 (__v2di)(__m128i)(index), \ 7995 (__mmask8)(mask), (int)(scale)) 7997 #define _mm256_mmask_i64gather_pd(v1_old, mask, index, addr, scale) \ 7998 (__m256d)__builtin_ia32_gather3div4df((__v4df)(__m256d)(v1_old), \ 7999 (void const *)(addr), \ 8000 (__v4di)(__m256i)(index), \ 8001 (__mmask8)(mask), (int)(scale)) 8003 #define _mm256_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) \ 8004 (__m256i)__builtin_ia32_gather3div4di((__v4di)(__m256i)(v1_old), \ 8005 (void const *)(addr), \ 8006 (__v4di)(__m256i)(index), \ 8007 (__mmask8)(mask), (int)(scale)) 8009 #define _mm_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \ 8010 (__m128)__builtin_ia32_gather3div4sf((__v4sf)(__m128)(v1_old), \ 8011 (void const *)(addr), \ 8012 (__v2di)(__m128i)(index), \ 8013 (__mmask8)(mask), (int)(scale)) 8015 #define _mm_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \ 8016 (__m128i)__builtin_ia32_gather3div4si((__v4si)(__m128i)(v1_old), \ 8017 (void const *)(addr), \ 8018 (__v2di)(__m128i)(index), \ 8019 (__mmask8)(mask), (int)(scale)) 8021 #define _mm256_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \ 8022 (__m128)__builtin_ia32_gather3div8sf((__v4sf)(__m128)(v1_old), \ 8023 (void const *)(addr), \ 8024 (__v4di)(__m256i)(index), \ 8025 (__mmask8)(mask), (int)(scale)) 8027 #define _mm256_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \ 8028 (__m128i)__builtin_ia32_gather3div8si((__v4si)(__m128i)(v1_old), \ 8029 (void const *)(addr), \ 8030 (__v4di)(__m256i)(index), \ 8031 (__mmask8)(mask), (int)(scale)) 8033 #define _mm_mmask_i32gather_pd(v1_old, mask, index, addr, scale) \ 8034 (__m128d)__builtin_ia32_gather3siv2df((__v2df)(__m128d)(v1_old), \ 8035 (void const *)(addr), \ 8036 (__v4si)(__m128i)(index), \ 8037 (__mmask8)(mask), (int)(scale)) 8039 #define _mm_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) \ 8040 (__m128i)__builtin_ia32_gather3siv2di((__v2di)(__m128i)(v1_old), \ 8041 (void const *)(addr), \ 8042 (__v4si)(__m128i)(index), \ 8043 (__mmask8)(mask), (int)(scale)) 8045 #define _mm256_mmask_i32gather_pd(v1_old, mask, index, addr, scale) \ 8046 (__m256d)__builtin_ia32_gather3siv4df((__v4df)(__m256d)(v1_old), \ 8047 (void const *)(addr), \ 8048 (__v4si)(__m128i)(index), \ 8049 (__mmask8)(mask), (int)(scale)) 8051 #define _mm256_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) \ 8052 (__m256i)__builtin_ia32_gather3siv4di((__v4di)(__m256i)(v1_old), \ 8053 (void const *)(addr), \ 8054 (__v4si)(__m128i)(index), \ 8055 (__mmask8)(mask), (int)(scale)) 8057 #define _mm_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \ 8058 (__m128)__builtin_ia32_gather3siv4sf((__v4sf)(__m128)(v1_old), \ 8059 (void const *)(addr), \ 8060 (__v4si)(__m128i)(index), \ 8061 (__mmask8)(mask), (int)(scale)) 8063 #define _mm_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \ 8064 (__m128i)__builtin_ia32_gather3siv4si((__v4si)(__m128i)(v1_old), \ 8065 (void const *)(addr), \ 8066 (__v4si)(__m128i)(index), \ 8067 (__mmask8)(mask), (int)(scale)) 8069 #define _mm256_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \ 8070 (__m256)__builtin_ia32_gather3siv8sf((__v8sf)(__m256)(v1_old), \ 8071 (void const *)(addr), \ 8072 (__v8si)(__m256i)(index), \ 8073 (__mmask8)(mask), (int)(scale)) 8075 #define _mm256_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \ 8076 (__m256i)__builtin_ia32_gather3siv8si((__v8si)(__m256i)(v1_old), \ 8077 (void const *)(addr), \ 8078 (__v8si)(__m256i)(index), \ 8079 (__mmask8)(mask), (int)(scale)) 8081 #define _mm256_permutex_pd(X, C) \ 8082 (__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(X), (int)(C)) 8084 #define _mm256_mask_permutex_pd(W, U, X, C) \ 8085 (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ 8086 (__v4df)_mm256_permutex_pd((X), (C)), \ 8087 (__v4df)(__m256d)(W)) 8089 #define _mm256_maskz_permutex_pd(U, X, C) \ 8090 (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ 8091 (__v4df)_mm256_permutex_pd((X), (C)), \ 8092 (__v4df)_mm256_setzero_pd()) 8094 #define _mm256_permutex_epi64(X, C) \ 8095 (__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(X), (int)(C)) 8097 #define _mm256_mask_permutex_epi64(W, U, X, C) \ 8098 (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ 8099 (__v4di)_mm256_permutex_epi64((X), (C)), \ 8100 (__v4di)(__m256i)(W)) 8102 #define _mm256_maskz_permutex_epi64(U, X, C) \ 8103 (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ 8104 (__v4di)_mm256_permutex_epi64((X), (C)), \ 8105 (__v4di)_mm256_setzero_si256()) 8110 return (__m256d)__builtin_ia32_permvardf256((__v4df)__Y, (__v4di)__X);
8117 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
8125 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
8133 return (__m256i)__builtin_ia32_permvardi256((__v4di) __Y, (__v4di) __X);
8139 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
8148 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
8153 #define _mm256_permutexvar_ps(A, B) _mm256_permutevar8x32_ps((B), (A)) 8158 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
8166 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
8171 #define _mm256_permutexvar_epi32(A, B) _mm256_permutevar8x32_epi32((B), (A)) 8177 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
8185 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
8190 #define _mm_alignr_epi32(A, B, imm) \ 8191 (__m128i)__builtin_ia32_alignd128((__v4si)(__m128i)(A), \ 8192 (__v4si)(__m128i)(B), (int)(imm)) 8194 #define _mm_mask_alignr_epi32(W, U, A, B, imm) \ 8195 (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ 8196 (__v4si)_mm_alignr_epi32((A), (B), (imm)), \ 8197 (__v4si)(__m128i)(W)) 8199 #define _mm_maskz_alignr_epi32(U, A, B, imm) \ 8200 (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ 8201 (__v4si)_mm_alignr_epi32((A), (B), (imm)), \ 8202 (__v4si)_mm_setzero_si128()) 8204 #define _mm256_alignr_epi32(A, B, imm) \ 8205 (__m256i)__builtin_ia32_alignd256((__v8si)(__m256i)(A), \ 8206 (__v8si)(__m256i)(B), (int)(imm)) 8208 #define _mm256_mask_alignr_epi32(W, U, A, B, imm) \ 8209 (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ 8210 (__v8si)_mm256_alignr_epi32((A), (B), (imm)), \ 8211 (__v8si)(__m256i)(W)) 8213 #define _mm256_maskz_alignr_epi32(U, A, B, imm) \ 8214 (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ 8215 (__v8si)_mm256_alignr_epi32((A), (B), (imm)), \ 8216 (__v8si)_mm256_setzero_si256()) 8218 #define _mm_alignr_epi64(A, B, imm) \ 8219 (__m128i)__builtin_ia32_alignq128((__v2di)(__m128i)(A), \ 8220 (__v2di)(__m128i)(B), (int)(imm)) 8222 #define _mm_mask_alignr_epi64(W, U, A, B, imm) \ 8223 (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \ 8224 (__v2di)_mm_alignr_epi64((A), (B), (imm)), \ 8225 (__v2di)(__m128i)(W)) 8227 #define _mm_maskz_alignr_epi64(U, A, B, imm) \ 8228 (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \ 8229 (__v2di)_mm_alignr_epi64((A), (B), (imm)), \ 8230 (__v2di)_mm_setzero_si128()) 8232 #define _mm256_alignr_epi64(A, B, imm) \ 8233 (__m256i)__builtin_ia32_alignq256((__v4di)(__m256i)(A), \ 8234 (__v4di)(__m256i)(B), (int)(imm)) 8236 #define _mm256_mask_alignr_epi64(W, U, A, B, imm) \ 8237 (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ 8238 (__v4di)_mm256_alignr_epi64((A), (B), (imm)), \ 8239 (__v4di)(__m256i)(W)) 8241 #define _mm256_maskz_alignr_epi64(U, A, B, imm) \ 8242 (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ 8243 (__v4di)_mm256_alignr_epi64((A), (B), (imm)), \ 8244 (__v4di)_mm256_setzero_si256()) 8249 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
8257 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
8265 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
8273 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
8281 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
8289 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
8297 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
8305 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
8310 #define _mm256_mask_shuffle_epi32(W, U, A, I) \ 8311 (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ 8312 (__v8si)_mm256_shuffle_epi32((A), (I)), \ 8313 (__v8si)(__m256i)(W)) 8315 #define _mm256_maskz_shuffle_epi32(U, A, I) \ 8316 (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ 8317 (__v8si)_mm256_shuffle_epi32((A), (I)), \ 8318 (__v8si)_mm256_setzero_si256()) 8320 #define _mm_mask_shuffle_epi32(W, U, A, I) \ 8321 (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ 8322 (__v4si)_mm_shuffle_epi32((A), (I)), \ 8323 (__v4si)(__m128i)(W)) 8325 #define _mm_maskz_shuffle_epi32(U, A, I) \ 8326 (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ 8327 (__v4si)_mm_shuffle_epi32((A), (I)), \ 8328 (__v4si)_mm_setzero_si128()) 8333 return (__m128d) __builtin_ia32_selectpd_128 ((
__mmask8) __U,
8341 return (__m128d) __builtin_ia32_selectpd_128 ((
__mmask8) __U,
8349 return (__m256d) __builtin_ia32_selectpd_256 ((
__mmask8) __U,
8357 return (__m256d) __builtin_ia32_selectpd_256 ((
__mmask8) __U,
8365 return (__m128) __builtin_ia32_selectps_128 ((
__mmask8) __U,
8373 return (__m128) __builtin_ia32_selectps_128 ((
__mmask8) __U,
8381 return (__m256) __builtin_ia32_selectps_256 ((
__mmask8) __U,
8389 return (__m256) __builtin_ia32_selectps_256 ((
__mmask8) __U,
8397 return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A,
8405 return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A,
8414 return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A,
8422 return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A,
8444 #define _mm_mask_cvt_roundps_ph(W, U, A, I) \ 8445 (__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \ 8446 (__v8hi)(__m128i)(W), \ 8449 #define _mm_maskz_cvt_roundps_ph(U, A, I) \ 8450 (__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \ 8451 (__v8hi)_mm_setzero_si128(), \ 8469 #define _mm256_mask_cvt_roundps_ph(W, U, A, I) \ 8470 (__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \ 8471 (__v8hi)(__m128i)(W), \ 8474 #define _mm256_maskz_cvt_roundps_ph(U, A, I) \ 8475 (__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \ 8476 (__v8hi)_mm_setzero_si128(), \ 8480 #undef __DEFAULT_FN_ATTRS128 8481 #undef __DEFAULT_FN_ATTRS256 static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi32(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [4 x i32] and returns a 128-bit vector ...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_min_epu32(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_mask_testn_epi32_mask(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_compress_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtusepi32_epi16(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_loadu_epi32(__m256i __W, __mmask8 __U, void const *__P)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_fmaddsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_and_epi64(__m256i __a, __m256i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_or_epi64(__m128i __a, __m128i __b)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_expandloadu_ps(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_unpacklo_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask2_permutex2var_epi32(__m256i __A, __m256i __I, __mmask8 __U, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_abs_epi64(__m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_scalef_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_andnot_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_compress_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi32(__m128i __a, int __count)
Left-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits...
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu32_ps(__mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvttpd_epu32(__m128i __W, __mmask8 __U, __m256d __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_mul_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_xor_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline __m128i __DEFAULT_FN_ATTRS128 _mm_load_epi64(void const *__P)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_fmsubadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi64_storeu_epi16(void *__P, __mmask8 __M, __m256i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_compressstoreu_epi32(void *__P, __mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi64_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_rorv_epi64(__m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_min_epi64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_getexp_pd(__m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sllv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_compress_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_compressstoreu_ps(void *__P, __mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvttps_epu32(__m256i __W, __mmask8 __U, __m256 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_min_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srav_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_sub_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi64_epi8(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtusepi64_epi8(__m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_fmaddsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_permutex2var_ps(__m256 __A, __m256i __I, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_max_epi64(__mmask8 __M, __m128i __A, __m128i __B)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_permutevar_pd(__m256d __a, __m256i __c)
Copies the values in a 256-bit vector of [4 x double] as specified by the 256-bit integer vector oper...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_min_epu32(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_set1_epi32(__mmask8 __M, int __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask3_fmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_load_epi32(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtepi64_epi16(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_min_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtepu16_epi64(__m128i __V)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_setzero_ps(void)
Constructs a 256-bit floating-point vector of [8 x float] with all vector elements initialized to zer...
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_pd(__m256d __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_set1_epi32(__mmask8 __M, int __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_rolv_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_unpacklo_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_getexp_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_min_pd(__mmask8 __U, __m256d __A, __m256d __B)
struct __storeu_i16 *__P __v
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_div_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu32_epi64(__m256i __W, __mmask8 __U, __m128i __X)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_loadu_epi32(__mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_rcp14_ps(__m128 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_expandloadu_ps(__m256 __W, __mmask8 __U, void const *__P)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi64_storeu_epi32(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_abs_epi32(__m256i __a)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_srav_epi64(__m128i __X, __m128i __Y)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_mask_testn_epi64_mask(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_moveldup_ps(__mmask8 __U, __m256 __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_mask_test_epi32_mask(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_unpackhi_ps(__m128 __a, __m128 __b)
Unpacks the high-order (index 2,3) values from two 128-bit vectors of [4 x float] and interleaves the...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_andnot_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srlv_epi32(__m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_abs_epi64(__m256i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_unpacklo_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi64_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sra_epi64(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtusepi32_epi8(__mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_fmsubadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_broadcastd_epi32(__mmask8 __M, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_broadcast_f32x4(__m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_unpacklo_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_andnot_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_add_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_pd(__m128d __a)
Calculates the square root of the each of two values stored in a 128-bit vector of [2 x double]...
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi64_epi8(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_broadcastss_ps(__m256 __O, __mmask8 __M, __m128 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_rsqrt14_pd(__m256d __A)
#define _mm_mask_cmpneq_epi64_mask(k, A, B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi32_ps(__m128 __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi32(__m128i __a, __m128i __count)
Left-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_rorv_epi32(__m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_mullo_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_unpacklo_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_add_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_min_ps(__m128 __a, __m128 __b)
Compares two 128-bit vectors of [4 x float] and returns the lesser of each pair of values...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_load_epi64(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu16_epi32(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi16_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_broadcastd_epi32(__m256i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_loadu_epi64(__mmask8 __U, void const *__P)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_moveldup_ps(__m256 __a)
Moves and duplicates even-indexed values from a 256-bit vector of [8 x float] to float values in a 25...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_xor_epi64(__m256i __a, __m256i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sllv_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtepi32_pd(__m128i __a)
Converts the lower two integer elements of a 128-bit vector of [4 x i32] into two double-precision fl...
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_permutevar_ps(__mmask8 __U, __m256 __A, __m256i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_permutexvar_epi32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi32(__m128i __a, int __count)
Right-shifts each of 32-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_add_ps(__m128 __a, __m128 __b)
Adds two 128-bit vectors of [4 x float], and returns the results of the addition. ...
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_expand_ps(__mmask8 __U, __m256 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_unpacklo_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_loadu_pd(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi64(__m128i __V)
Sign-extends each of the lower two 16-bit integer elements of a 128-bit integer vector of [8 x i16] t...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi32_epi8(__mmask8 __M, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_store_ps(void *__P, __mmask8 __U, __m128 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi64x(long long __q)
Initializes both values in a 128-bit integer vector with the specified 64-bit integer value...
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask2_permutex2var_ps(__m128 __A, __m128i __I, __mmask8 __U, __m128 __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_permutex2var_ps(__mmask8 __U, __m256 __A, __m256i __I, __m256 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_cvtps_pd(__mmask8 __U, __m128 __A)
static __inline __m256i __DEFAULT_FN_ATTRS256 _mm256_load_epi64(void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtpd_epi32(__mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srl_epi64(__m256i __a, __m128i __count)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_cvtps_pd(__m256d __W, __mmask8 __U, __m128 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_getexp_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_or_epi32(__m128i __a, __m128i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtusepi64_epi8(__mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_min_epi64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_or_epi32(__m256i __a, __m256i __b)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sub_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_unpacklo_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_blend_epi32(__mmask8 __U, __m256i __A, __m256i __W)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_rsqrt14_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_getexp_ps(__m256 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtps_pd(__mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_abs_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_scalef_pd(__m256d __A, __m256d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_compressstoreu_pd(void *__P, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_permutex2var_epi64(__m128i __A, __mmask8 __U, __m128i __I, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_broadcast_i32x4(__m256i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_sqrt_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_expandloadu_ps(__m128 __W, __mmask8 __U, void const *__P)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi64_storeu_epi16(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtpd_epu32(__m256d __A)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi32_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_rolv_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline void __DEFAULT_FN_ATTRS128 _mm_store_epi64(void *__P, __m128i __A)
#define _mm256_cmpeq_epi64_mask(A, B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_store_ps(void *__P, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi8_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtsepi32_epi16(__mmask8 __M, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu32_ps(__m256 __W, __mmask8 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_set1_epi64(__m256i __O, __mmask8 __M, long long __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_mul_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi16_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_loadu_epi32(__m128i __W, __mmask8 __U, void const *__P)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_storeu_pd(void *__P, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtsepi64_epi32(__mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi32(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [4 x i32], saving the lower 32 bits of each...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_mov_epi64(__mmask8 __U, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_mul_epu32(__mmask8 __M, __m256i __X, __m256i __Y)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_mul_ps(__m256 __a, __m256 __b)
Multiplies two 256-bit vectors of [8 x float].
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_blend_ps(__mmask8 __U, __m128 __A, __m128 __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi64_epi32(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_unpackhi_epi64(__m256i __a, __m256i __b)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_max_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_mov_pd(__mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_unpacklo_epi32(__m256i __a, __m256i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_expand_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_load_pd(__m256d __W, __mmask8 __U, void const *__P)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi64_storeu_epi16(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu32(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [4 x u32] and returns a 128-bit vector ...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_set1_epi32(__m256i __O, __mmask8 __M, int __A)
static __inline void __DEFAULT_FN_ATTRS256 _mm256_storeu_epi32(void *__P, __m256i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_div_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_fmsubadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtps_epi32(__m128i __W, __mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_min_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi64(__m128i __a, __m128i __b)
Subtracts the corresponding elements of two [2 x i64] vectors.
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_unpackhi_pd(__m256d __a, __m256d __b)
Unpacks the odd-indexed vector elements from two 256-bit vectors of [4 x double] and interleaves them...
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_scalef_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi32_pd(__mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask3_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvttps_epi32(__mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_mullo_epi32(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sub_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtps_epu32(__m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi64(__m128i __a, int __count)
Right-shifts each of 64-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_abs_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_permutex2var_epi32(__m256i __A, __mmask8 __U, __m256i __I, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_max_epu64(__m128i __A, __m128i __B)
#define _mm256_mask_cmpeq_epi64_mask(k, A, B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_fmaddsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_max_epu64(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtusepi64_epi8(__m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_unpacklo_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_unpackhi_ps(__m256 __a, __m256 __b)
Unpacks the 32-bit vector elements 2, 3, 6 and 7 from each of the two 256-bit vectors of [8 x float] ...
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_rorv_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
#define _mm256_cmpneq_epi64_mask(A, B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_broadcastss_ps(__m128 __O, __mmask8 __M, __m128 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_unpacklo_ps(__m128 __a, __m128 __b)
Unpacks the low-order (index 0,1) values from two 128-bit vectors of [4 x float] and interleaves them...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_unpacklo_pd(__m128d __a, __m128d __b)
Unpacks the low-order 64-bit elements from two 128-bit vectors of [2 x double] and interleaves them i...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_permutex2var_epi64(__m256i __A, __mmask8 __U, __m256i __I, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_or_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_mul_epi32(__mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_mov_epi32(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi64_epi8(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_cvtph_ps(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srai_epi64(__m256i __A, int __imm)
static __inline void __DEFAULT_FN_ATTRS128 _mm_storeu_epi32(void *__P, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_mullo_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_add_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask2_permutex2var_ps(__m256 __A, __m256i __I, __mmask8 __U, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_testn_epi32_mask(__m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_div_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_loadu_ps(__m128 __W, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_unpackhi_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvttpd_epi32(__m128i __W, __mmask8 __U, __m128d __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_rsqrt14_pd(__m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask2_permutex2var_epi64(__m256i __A, __m256i __I, __mmask8 __U, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_xor_epi32(__m128i __a, __m128i __b)
static __inline __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtps_ph(__m128i __W, __mmask8 __U, __m128 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_rcp14_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sllv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_max_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srai_epi32(__m256i __a, int __count)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi32(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [4 x i32] and returns a 128-bit vector ...
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_fmsubadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi32_storeu_epi16(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_max_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_sub_ps(__m128 __a, __m128 __b)
Subtracts each of the values of the second operand from the first operand, both of which are 128-bit ...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_max_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_min_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_min_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_moveldup_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_unpackhi_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm256_mask_cvtsepi32_storeu_epi8(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sub_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_and_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtsepi32_epi8(__mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi32(__m128i __a, __m128i __b)
Unpacks the high-order (index 2,3) values from two 128-bit vectors of [4 x i32] and interleaves them ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_abs_epi32(__m128i __a)
Computes the absolute value of each of the packed 32-bit signed integers in the source operand and st...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sll_epi32(__m256i __a, __m128i __count)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_xor_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_mov_pd(__m128d __W, __mmask8 __U, __m128d __A)
#define _mm256_cmpeq_epi32_mask(A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srav_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi64(__m128i __V)
Zero-extends each of the lower two 16-bit integer elements of a 128-bit integer vector of [8 x i16] t...
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtepi64_epi32(__m256i __A)
#define _mm256_mask_cmpneq_epi64_mask(k, A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srl_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtps_epu32(__mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sub_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_unpackhi_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_mul_epu32(__mmask8 __M, __m128i __X, __m128i __Y)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_moveldup_ps(__m128 __a)
Duplicates even-indexed values from a 128-bit vector of [4 x float] to float values stored in a 128-b...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_permutexvar_epi32(__mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_srai_epi64(__m128i __A, int __imm)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtsepi64_epi8(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, int __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_store_epi32(void *__P, __mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, int __B)
static __inline __m128i __DEFAULT_FN_ATTRS _mm256_cvtpd_epi32(__m256d __a)
Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4 x i32].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi32_epi64(__m128i __V)
Sign-extends each of the lower two 32-bit integer elements of a 128-bit integer vector of [4 x i32] t...
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask3_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu32_pd(__m256d __W, __mmask8 __U, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttpd_epu32(__m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvttpd_epu32(__mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_unpacklo_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_add_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_mov_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline __m128i __DEFAULT_FN_ATTRS128 _mm_loadu_epi32(void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtpd_ps(__mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sra_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_rsqrt14_ps(__m256 __A)
#define _mm_cmpneq_epi32_mask(A, B)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set1_epi64x(long long __q)
Constructs a 256-bit integer vector of [4 x i64], with each of the 64-bit integral vector elements se...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_srlv_epi64(__m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvttpd_epi32(__mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_rcp14_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_loadu_pd(__m128d __W, __mmask8 __U, void const *__P)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_fmsubadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_cvtepu32_ps(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_sllv_epi64(__m128i __X, __m128i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srl_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtusepi64_epi16(__m128i __A)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_sqrt_pd(__m256d __a)
Calculates the square roots of the values in a 256-bit vector of [4 x double].
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srli_epi64(__m256i __a, int __count)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sll_epi64(__m256i __a, __m128i __count)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_and_epi32(__m256i __a, __m256i __b)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_broadcast_f32x4(__mmask8 __M, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_loadu_epi64(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_permutex2var_epi32(__mmask8 __U, __m256i __A, __m256i __I, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_cvtps_pd(__m128d __W, __mmask8 __U, __m128 __A)
static __inline void __DEFAULT_FN_ATTRS256 _mm256_store_epi64(void *__P, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_fmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi32_epi8(__mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_expand_epi64(__mmask8 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srl_epi32(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mul_epu32(__m256i __a, __m256i __b)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_loadu_epi32(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srav_epi64(__m256i __X, __m256i __Y)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_sub_pd(__m256d __a, __m256d __b)
Subtracts two 256-bit vectors of [4 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtusepi64_epi32(__m128i __A)
static __inline void __DEFAULT_FN_ATTRS256 _mm256_store_epi32(void *__P, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_min_epi64(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi64_epi16(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_or_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_and_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_abs_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_max_ps(__m128 __a, __m128 __b)
Compares two 128-bit vectors of [4 x float] and returns the greater of each pair of values...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_rorv_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_unpackhi_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sll_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_sub_ps(__m256 __a, __m256 __b)
Subtracts two 256-bit vectors of [8 x float].
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_broadcastq_epi64(__mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_add_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_xor_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_cvtepu32_ps(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_min_epu64(__m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask2_permutex2var_pd(__m128d __A, __m128i __I, __mmask8 __U, __m128d __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_expand_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_min_epi64(__m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_scalef_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_load_pd(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_unpacklo_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvttps_epu32(__mmask8 __U, __m128 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_min_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_expandloadu_epi64(__m256i __W, __mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_max_epi64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_compress_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi64_epi8(__mmask8 __M, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi64_storeu_epi16(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_mul_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_rolv_epi64(__m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_fmaddsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtsepi32_epi16(__m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtepi32_epi64(__m128i __V)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_rolv_epi32(__m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_mul_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi64(__m128i __a, __m128i __count)
Left-shifts each 64-bit value in the 128-bit integer vector operand by the specified number of bits...
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_max_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_fmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_mul_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srlv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_compress_ps(__mmask8 __U, __m128 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_blend_pd(__mmask8 __U, __m128d __A, __m128d __W)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_permutevar_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256i __C)
static __inline void __DEFAULT_FN_ATTRS256 _mm256_storeu_epi64(void *__P, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_div_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_mask_testn_epi32_mask(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi32(__m128i __V1, __m128i __V2)
Multiples corresponding elements of two 128-bit vectors of [4 x i32] and returns the lower 32 bits of...
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_broadcastss_ps(__mmask8 __M, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B)
static __inline__ void int __a
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_unpackhi_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_min_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_sub_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi32_storeu_epi8(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi8_epi32(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sra_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtusepi64_epi16(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_add_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_expand_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sll_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtsepi64_epi16(__mmask8 __M, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_getexp_pd(__m256d __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi32_ps(__mmask8 __U, __m256i __A)
static __inline __m128i __DEFAULT_FN_ATTRS _mm256_cvttpd_epi32(__m256d __a)
Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4 x i32], truncating the result b...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_sqrt_ps(__m128 __a)
Calculates the square roots of the values stored in a 128-bit vector of [4 x float].
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu16_epi64(__m256i __W, __mmask8 __U, __m128i __A)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_add_ps(__m256 __a, __m256 __b)
Adds two 256-bit vectors of [8 x float].
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtusepi32_epi16(__m256i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_rsqrt14_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_permutevar_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256i __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_expand_ps(__mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi64_epi16(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_unpacklo_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_set1_epi32(__m128i __O, __mmask8 __M, int __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_expandloadu_pd(__m256d __W, __mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_scalef_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtusepi64_epi32(__m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask3_fmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi16_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_blend_ps(__mmask8 __U, __m256 __A, __m256 __W)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_max_epi64(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_min_epi32(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask3_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_movehdup_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtepi16_epi64(__m128i __V)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_srlv_epi32(__m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srl_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sll_epi64(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sub_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_andnot_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_unpacklo_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_rorv_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_unpackhi_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline void __DEFAULT_FN_ATTRS128 _mm_store_epi32(void *__P, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sll_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtusepi32_epi16(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_andnot_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtepu16_epi32(__m128i __V)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_set1_epi64(__mmask8 __M, long long __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_broadcastq_epi64(__m128i __X)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtepi8_epi64(__m128i __V)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srl_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_broadcastss_ps(__m128 __X)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi32(__m128i __a, int __count)
Right-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi32(__m128i __a, __m128i __b)
Subtracts the corresponding 32-bit integer values in the operands.
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_mov_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi32_pd(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_xor_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi64_storeu_epi32(void *__P, __mmask8 __M, __m256i __A)
#define _mm_mask_cmpeq_epi64_mask(k, A, B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi32_storeu_epi16(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi16_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtpd_epu32(__mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtusepi64_epi16(__mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_abs_epi64(__mmask8 __U, __m256i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_expand_pd(__mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_permutex2var_epi64(__mmask8 __U, __m256i __A, __m256i __I, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_permutex2var_pd(__m256d __A, __mmask8 __U, __m256i __I, __m256d __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_permutexvar_ps(__m256 __W, __mmask8 __U, __m256i __X, __m256 __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_compress_epi32(__mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_rorv_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_storeu_epi64(void *__P, __mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_unpackhi_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_add_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_compressstoreu_epi32(void *__P, __mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_broadcastsd_pd(__m256d __O, __mmask8 __M, __m128d __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_sub_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_min_ps(__m256 __a, __m256 __b)
Compares two 256-bit vectors of [8 x float] and returns the lesser of each pair of values...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_mov_epi32(__mmask8 __U, __m256i __A)
short __v2hi __attribute__((__vector_size__(4)))
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_max_epu64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_setzero_ps(void)
Constructs a 128-bit floating-point vector of [4 x float] initialized to zero.
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_min_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_unpacklo_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_store_epi64(void *__P, __mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_max_epi32(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtps_epi32(__mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtps_epu32(__mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_movedup_pd(__m256d __a)
Moves and duplicates double-precision floating point values from a 256-bit vector of [4 x double] to ...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_rolv_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_permutex2var_ps(__mmask8 __U, __m128 __A, __m128i __I, __m128 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sub_epi64(__m256i __a, __m256i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_permutexvar_epi64(__mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_or_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_mullo_epi32(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_setzero_pd(void)
Constructs a 128-bit floating-point vector of [2 x double] initialized to zero.
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu32_pd(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtsepi64_epi32(__mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtsepi64_epi32(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi32_epi16(__mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_expandloadu_epi32(__m128i __W, __mmask8 __U, void const *__P)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sub_pd(__m128d __a, __m128d __b)
Subtracts two 128-bit vectors of [2 x double].
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_unpackhi_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_permutex2var_ps(__m256 __A, __mmask8 __U, __m256i __I, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtsepi64_epi8(__mmask8 __M, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_permutevar_pd(__mmask8 __U, __m256d __A, __m256i __C)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_unpacklo_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline __m128 __DEFAULT_FN_ATTRS128 _mm_permutevar_ps(__m128 __a, __m128i __c)
Copies the values stored in a 128-bit vector of [4 x float] as specified by the 128-bit integer vecto...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi32_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtps_pd(__m128 __a)
Converts the lower two single-precision floating-point elements of a 128-bit vector of [4 x float] in...
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_cvtepu32_pd(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi64(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [2 x i64], saving the lower 64 bits of each...
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_div_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_min_epi64(__m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask3_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_xor_epi64(__m128i __a, __m128i __b)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_mask_testn_epi64_mask(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi64_storeu_epi16(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_sqrt_ps(__mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_permutex2var_epi64(__m256i __A, __m256i __I, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_max_epu32(__m256i __a, __m256i __b)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_rsqrt14_pd(__mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, int __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_fmsubadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srlv_epi64(__m256i __X, __m256i __Y)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi32_storeu_epi16(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srlv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_sra_epi64(__m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_expandloadu_ps(__mmask8 __U, void const *__P)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_add_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_expand_epi32(__mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttps_epi32(__m128 __a)
Converts a vector of [4 x float] into a vector of [4 x i32], truncating the result when it is inexact...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_sllv_epi32(__m128i __X, __m128i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sllv_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtpd_epi32(__mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi64(__m128i __a, int __count)
Left-shifts each 64-bit value in the 128-bit integer vector operand by the specified number of bits...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_broadcast_i32x4(__mmask8 __M, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi64_storeu_epi16(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_expandloadu_epi32(__m256i __W, __mmask8 __U, void const *__P)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_loadu_ps(__m256 __W, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_mov_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epi32(__m128i __V1, __m128i __V2)
Multiplies corresponding even-indexed elements of two 128-bit vectors of [4 x i32] and returns a 128-...
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_scalef_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_or_epi64(__m256i __a, __m256i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_andnot_epi64(__m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mul_epi32(__m256i __a, __m256i __b)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_load_ps(__m128 __W, __mmask8 __U, void const *__P)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_add_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_mov_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_permutexvar_pd(__m256d __W, __mmask8 __U, __m256i __X, __m256d __Y)
#define _mm_mask_cmpeq_epi32_mask(k, A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtsepi32_epi16(__m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtepu8_epi64(__m128i __V)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_rsqrt14_ps(__mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtsepi64_epi16(__m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_getexp_pd(__mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtepi32_epi8(__m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_add_pd(__m128d __a, __m128d __b)
Adds two 128-bit vectors of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_rcp14_pd(__m128d __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_broadcastss_ps(__m128 __X)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi32_epi16(__mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_rolv_epi64(__m256i __A, __m256i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi32_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_unpackhi_epi32(__m256i __a, __m256i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_loadu_epi64(__m128i __W, __mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_min_epi32(__m256i __a, __m256i __b)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_scalef_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi64(__m128i __V)
Zero-extends each of the lower two 8-bit integer elements of a 128-bit integer vector of [16 x i8] to...
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_expandloadu_pd(__m128d __W, __mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_unpacklo_epi64(__m256i __a, __m256i __b)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_compress_pd(__mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_mov_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_permutevar_ps(__mmask8 __U, __m128 __A, __m128i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi32_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_and_epi64(__m128i __a, __m128i __b)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_test_epi32_mask(__m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi32_epi16(__m128i __O, __mmask8 __M, __m128i __A)
static __inline __m128 __DEFAULT_FN_ATTRS _mm256_cvtpd_ps(__m256d __a)
Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4 x float].
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtph_ps(__mmask8 __U, __m128i __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_test_epi64_mask(__m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_and_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
#define __DEFAULT_FN_ATTRS256
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_min_epu64(__m256i __A, __m256i __B)
static __inline __m256i __DEFAULT_FN_ATTRS256 _mm256_loadu_epi32(void const *__P)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu32_pd(__m128d __W, __mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_broadcastss_ps(__mmask8 __M, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask2_permutex2var_epi32(__m128i __A, __m128i __I, __mmask8 __U, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_storeu_ps(void *__P, __mmask8 __U, __m256 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask3_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
static __inline__ vector float vector float __b
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_permutex2var_ps(__m128 __A, __mmask8 __U, __m128i __I, __m128 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mullo_epi32(__m256i __a, __m256i __b)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srlv_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi64_epi32(__mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtusepi64_epi16(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srlv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A)
static __inline void __DEFAULT_FN_ATTRS128 _mm_storeu_epi64(void *__P, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_div_pd(__m128d __a, __m128d __b)
Performs an element-by-element division of two 128-bit vectors of [2 x double].
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_andnot_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvttps_epi32(__mmask8 __U, __m256 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_min_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi64(__m128i __a, __m128i __b)
Unpacks the low-order 64-bit elements from two 128-bit vectors of [2 x i64] and interleaves them into...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_permutex2var_epi32(__m128i __A, __m128i __I, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_rsqrt14_ps(__m128 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_rcp14_ps(__m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtepi64_epi8(__m128i __A)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_unpacklo_pd(__m256d __a, __m256d __b)
Unpacks the even-indexed vector elements from two 256-bit vectors of [4 x double] and interleaves the...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_andnot_epi64(__m256i __A, __m256i __B)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_div_ps(__m256 __a, __m256 __b)
Divides two 256-bit vectors of [8 x float].
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_srav_epi32(__m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_or_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sra_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_permutex2var_pd(__m128d __A, __mmask8 __U, __m128i __I, __m128d __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_expandloadu_pd(__mmask8 __U, void const *__P)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_max_ps(__m256 __a, __m256 __b)
Compares two 256-bit vectors of [8 x float] and returns the greater of each pair of values...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtsepi32_epi8(__m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_div_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_unpackhi_pd(__m128d __a, __m128d __b)
Unpacks the high-order 64-bit elements from two 128-bit vectors of [2 x double] and interleaves them ...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_expand_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_and_si256(__m256i __a, __m256i __b)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_loadu_ps(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srav_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_permutevar_pd(__mmask8 __U, __m128d __A, __m128i __C)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi64_storeu_epi8(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srlv_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sll_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi64_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_min_epu64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sllv_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srlv_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtepi64_epi8(__m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_store_epi32(void *__P, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask2_permutex2var_epi64(__m128i __A, __m128i __I, __mmask8 __U, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_max_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_andnot_epi32(__m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_broadcastd_epi32(__m128i __X)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_cvtepi32_ps(__m256i __a)
Converts a vector of [8 x i32] into a vector of [8 x float].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi32(__m128i __V)
Zero-extends each of the lower four 16-bit integer elements of a 128-bit integer vector of [8 x i16] ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi32(__m128i __a, __m128i __count)
Right-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits...
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi64_storeu_epi32(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_and_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_permutex2var_epi64(__m128i __A, __m128i __I, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvttps_epu32(__mmask8 __U, __m256 __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_mask_test_epi32_mask(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_fmaddsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_rolv_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_permutex2var_pd(__mmask8 __U, __m128d __A, __m128i __I, __m128d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sra_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_expand_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu32_ps(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srav_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_loadu_epi64(__m256i __W, __mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_sqrt_ps(__mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_or_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi32_pd(__m128d __W, __mmask8 __U, __m128i __A)
#define _mm256_mask_cmpeq_epi32_mask(k, A, B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_movedup_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_rorv_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi16_epi32(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_slli_epi64(__m256i __a, int __count)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_expand_pd(__mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_rorv_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_mul_epi32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_getexp_pd(__mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sllv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, int __imm)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_load_epi64(__mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_mov_ps(__mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_load_epi64(__m256i __W, __mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_moveldup_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtps_ph(__mmask8 __U, __m256 __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_store_pd(void *__P, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtsepi64_epi32(__m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sub_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi64(__m128i __V)
Sign-extends each of the lower two 8-bit integer elements of a 128-bit integer vector of [16 x i8] to...
#define _mm256_permutexvar_epi32(A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_abs_epi32(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_permutexvar_pd(__m256i __X, __m256d __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi8_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_and_si128(__m128i __a, __m128i __b)
Performs a bitwise AND of two 128-bit integer vectors.
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_expandloadu_epi32(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtepu32_epi64(__m128i __V)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtps_epi32(__m256i __W, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_and_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_expandloadu_epi64(__m128i __W, __mmask8 __U, void const *__P)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_div_pd(__m256d __a, __m256d __b)
Divides two 256-bit vectors of [4 x double].
#define _mm256_permutexvar_ps(A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi32_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_max_epi64(__m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_unpacklo_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_and_epi32(__m128i __a, __m128i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_add_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_sub_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srlv_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srav_epi32(__m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtepi16_epi32(__m128i __V)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __imm)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_max_epi64(__m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_min_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi32(__m128i __a, __m128i __count)
Right-shifts each of 32-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_sub_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi32(__m128i __a, __m128i __b)
Unpacks the low-order (index 0,1) values from two 128-bit vectors of [4 x i32] and interleaves them i...
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_testn_epi64_mask(__m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_load_epi32(__m256i __W, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi64_epi16(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi64_epi32(__mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi32_epi64(__m128i __W, __mmask8 __U, __m128i __X)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_ps(__m256 __W, __mmask8 __U, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_load_ps(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi64_epi16(__mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_rolv_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_cvtph_ps(__m128 __W, __mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_moveldup_ps(__mmask8 __U, __m128 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_movehdup_ps(__mmask8 __U, __m128 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_rsqrt14_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_rsqrt14_pd(__mmask8 __U, __m128d __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_pd(__m128d __a, __m128d __b)
Performs element-by-element comparison of the two 128-bit vectors of [2 x double] and returns the vec...
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_sqrt_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtusepi32_epi16(__mmask8 __M, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_compress_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi64_epi16(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtepi64_epi32(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_permutevar_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128i __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srl_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, int __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srl_epi32(__m256i __a, __m128i __count)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_load_pd(__mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_compress_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_add_epi32(__m256i __a, __m256i __b)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi8_epi64(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_and_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtsepi64_epi16(__mmask8 __M, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_sqrt_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_rorv_epi32(__m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_compress_ps(__mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_expand_epi32(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_scalef_ps(__m256 __A, __m256 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_sqrt_pd(__mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, int __imm)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_mul_pd(__m256d __a, __m256d __b)
Multiplies two 256-bit vectors of [4 x double].
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_compressstoreu_epi64(void *__P, __mmask8 __U, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_compressstoreu_pd(void *__P, __mmask8 __U, __m256d __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mul_ps(__m128 __a, __m128 __b)
Multiplies two 128-bit vectors of [4 x float] and returns the results of the multiplication.
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_blend_epi32(__mmask8 __U, __m128i __A, __m128i __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtpd_epu32(__m128i __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_broadcastq_epi64(__m128i __X)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtusepi64_epi32(__mmask8 __M, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_div_ps(__m128 __a, __m128 __b)
Divides two 128-bit vectors of [4 x float].
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_mov_ps(__mmask8 __U, __m256 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_rcp14_pd(__mmask8 __U, __m128d __A)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_sqrt_ps(__m256 __a)
Calculates the square roots of the values in a 256-bit vector of [8 x float].
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_permutex2var_ps(__m128 __A, __m128i __I, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_and_epi64(__mmask8 __U, __m128i __A, __m128i __B)
#define _mm_cmpeq_epi32_mask(A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtpd_epu32(__mmask8 __U, __m128d __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_broadcastsd_pd(__m128d __X)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_fmaddsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_permutex2var_epi64(__mmask8 __U, __m128i __A, __m128i __I, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvttpd_epi32(__m128i __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtsepi32_epi8(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_blend_epi64(__mmask8 __U, __m256i __A, __m256i __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setzero_si128(void)
Creates a 128-bit integer vector initialized to zero.
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtusepi32_epi8(__m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_slli_epi32(__m256i __a, int __count)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epu32(__m128i __a, __m128i __b)
Multiplies 32-bit unsigned integer values contained in the lower bits of the corresponding elements o...
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_storeu_epi8(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_add_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_rsqrt14_ps(__mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_compress_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_min_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
#define __DEFAULT_FN_ATTRS128
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_add_epi64(__m256i __a, __m256i __b)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi64(__m128i __a, __m128i __b)
Unpacks the high-order 64-bit elements from two 128-bit vectors of [2 x i64] and interleaves them int...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_rolv_epi32(__m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_max_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_broadcastq_epi64(__mmask8 __M, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_scalef_pd(__m128d __A, __m128d __B)
#define _MM_FROUND_CUR_DIRECTION
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_andnot_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_rcp14_ps(__mmask8 __U, __m256 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_getexp_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_unpackhi_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_load_pd(__m128d __W, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi32(int __i)
Initializes all values in a 128-bit vector of [4 x i32] with the specified 32-bit value...
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_add_pd(__m256d __a, __m256d __b)
Adds two 256-bit vectors of [4 x double].
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi64_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_broadcastsd_pd(__mmask8 __M, __m128d __A)
static __inline __m256i __DEFAULT_FN_ATTRS256 _mm256_loadu_epi64(void const *__P)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_sqrt_pd(__mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sub_epi32(__m256i __a, __m256i __b)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi64_storeu_epi32(void *__P, __mmask8 __M, __m256i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_storeu_epi16(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sllv_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_movedup_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi32_epi16(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_unpackhi_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, int __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtepu8_epi32(__m128i __V)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtps_epu32(__m128i __W, __mmask8 __U, __m128 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_sub_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_max_epi32(__m256i __a, __m256i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi64_epi8(__mmask8 __M, __m256i __A)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_set1_epi32(int __i)
Constructs a 256-bit integer vector of [8 x i32], with each of the 32-bit integral vector elements se...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srli_epi32(__m256i __a, int __count)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_permutevar_ps(__m256 __a, __m256i __c)
Copies the values stored in a 256-bit vector of [8 x float] as specified by the 256-bit integer vecto...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_expandloadu_epi64(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_min_epu32(__m256i __a, __m256i __b)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_permutex2var_pd(__mmask8 __U, __m256d __A, __m256i __I, __m256d __B)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_cvttps_epi32(__m256 __a)
Converts a vector of [8 x float] into a vector of [8 x i32], truncating the result by rounding toward...
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_fmaddsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_loadu_pd(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_broadcast_i32x4(__m128i __A)
#define _mm_cmpneq_epi64_mask(A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_mul_epi32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu32(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [4 x u32] and returns a 128-bit vector ...
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_div_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvttps_epi32(__m128i __W, __mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, int __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_or_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_max_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_div_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_rsqrt14_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_rorv_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_mov_pd(__mmask8 __U, __m128d __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_movedup_pd(__mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtps_epu32(__m256 __A)
static __inline __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtps_ph(__mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_broadcastd_epi32(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sub_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_scalef_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_unpackhi_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_max_epu64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_mul_epu32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_min_epu64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_movehdup_ps(__m128 __a)
Moves and duplicates odd-indexed values from a 128-bit vector of [4 x float] to float values stored i...
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_movedup_pd(__mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_max_epu64(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_rolv_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_add_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_testn_epi32_mask(__m256i __A, __m256i __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_test_epi64_mask(__m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtusepi32_epi8(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvttps_epu32(__m128i __W, __mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __imm)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_cvtpd_ps(__mmask8 __U, __m128d __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_getexp_ps(__mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_undefined_si128(void)
Generates a 128-bit vector of [4 x i32] with unspecified content.
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sra_epi32(__m256i __a, __m128i __count)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_loadu_ps(__mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi32_ps(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi64_epi16(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi32(__m128i __V)
Sign-extends each of the lower four 16-bit integer elements of a 128-bit integer vector of [8 x i16] ...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvttpd_epu32(__m128i __W, __mmask8 __U, __m128d __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu32_pd(__mmask8 __U, __m128i __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_testn_epi64_mask(__m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_max_epu32(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sll_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_compress_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_compressstoreu_epi64(void *__P, __mmask8 __U, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_movedup_pd(__m128d __a)
Moves and duplicates the double-precision value in the lower bits of a 128-bit vector of [2 x double]...
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_rcp14_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_max_epi32(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_mov_epi64(__mmask8 __U, __m128i __A)
static __inline __m256i __DEFAULT_FN_ATTRS256 _mm256_load_epi32(void const *__P)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_mask_test_epi64_mask(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtps_epi32(__mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtepi32_epi16(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu16_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_max_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_mul_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi64(__m128i __a, __m128i __count)
Right-shifts each of 64-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_max_epi64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_sub_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi32(__m128i __V)
Sign-extends each of the lower four 8-bit integer elements of a 128-bit vector of [16 x i8] to 32-bit...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_permutex2var_epi32(__mmask8 __U, __m128i __A, __m128i __I, __m128i __B)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_cvtepi32_pd(__m128i __a)
Converts a vector of [4 x i32] into a vector of [4 x double].
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtepi8_epi32(__m128i __V)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtsepi64_epi16(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtsepi64_epi8(__mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtpd_epu32(__m128d __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_expandloadu_pd(__mmask8 __U, void const *__P)
static __inline __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtps_ph(__m128i __W, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_mul_epu32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_compressstoreu_ps(void *__P, __mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sllv_epi32(__m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu32_ps(__m128 __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sub_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_loadu_pd(__m256d __W, __mmask8 __U, void const *__P)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_permutevar_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128i __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_getexp_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_load_epi64(__m128i __W, __mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_expandloadu_epi64(__mmask8 __U, void const *__P)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm_mask_cvtepi32_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_permutexvar_ps(__mmask8 __U, __m256i __X, __m256 __Y)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_compress_pd(__mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sllv_epi64(__m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtpd_epu32(__m128i __W, __mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_and_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_andnot_epi32(__m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi32_epi16(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_mul_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
#define _mm256_cmpneq_epi32_mask(A, B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi64_storeu_epi8(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu8_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline __m128i __DEFAULT_FN_ATTRS128 _mm_loadu_epi64(void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_expand_epi64(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_load_ps(__m256 __W, __mmask8 __U, void const *__P)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_unpacklo_ps(__m256 __a, __m256 __b)
Unpacks the 32-bit vector elements 0, 1, 4 and 5 from each of the two 256-bit vectors of [8 x float] ...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srav_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi64_storeu_epi8(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_permutexvar_epi64(__m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srav_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srlv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_cvtps_epi32(__m256 __a)
Converts a vector of [8 x float] into a vector of [8 x i32].
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_broadcastd_epi32(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtpd_epi32(__m128i __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtps_epi32(__m128 __a)
Converts a vector of [4 x float] into a vector of [4 x i32].
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_load_epi32(__m128i __W, __mmask8 __U, void const *__P)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_rcp14_pd(__mmask8 __U, __m256d __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_mov_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_rolv_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_add_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_setzero_si256(void)
Constructs a 256-bit integer vector initialized to zero.
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_fmsubadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvttpd_epu32(__m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtps_epu32(__m256i __W, __mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sra_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_or_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_movehdup_ps(__m256 __a)
Moves and duplicates odd-indexed values from a 256-bit vector of [8 x float] to float values in a 256...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_mul_epi32(__mmask8 __M, __m128i __X, __m128i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_epi64(__m256i __W, __mmask8 __U, __m128i __X)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_movehdup_ps(__mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srav_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_broadcastq_epi64(__m256i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu8_epi64(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_cvtepu32_pd(__m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_pd(__m128d __a, __m128d __b)
Performs element-by-element comparison of the two 128-bit vectors of [2 x double] and returns the vec...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_blend_epi64(__mmask8 __U, __m128i __A, __m128i __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_xor_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline __m128i __DEFAULT_FN_ATTRS128 _mm_load_epi32(void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_min_epi64(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu32_epi64(__m128i __V)
Zero-extends each of the lower two 32-bit integer elements of a 128-bit integer vector of [4 x i32] t...
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_getexp_ps(__mmask8 __U, __m128 __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_storeu_epi32(void *__P, __mmask8 __U, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_scalef_ps(__m128 __A, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi64_epi16(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask3_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_rcp14_pd(__m256d __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi32_storeu_epi16(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_max_epu64(__m256i __A, __m256i __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_test_epi32_mask(__m128i __A, __m128i __B)
static __inline __m128d __DEFAULT_FN_ATTRS128 _mm_permutevar_pd(__m128d __a, __m128i __c)
Copies the values in a 128-bit vector of [2 x double] as specified by the 128-bit integer vector oper...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_mov_epi64(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi8_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sra_epi32(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_xor_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu32_epi64(__m128i __W, __mmask8 __U, __m128i __X)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_unpackhi_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_unpacklo_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_permutexvar_pd(__mmask8 __U, __m256i __X, __m256d __Y)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_movehdup_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask3_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sllv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_xor_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srl_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_max_epu32(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sra_epi64(__m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi32(__m128i __V)
Zero-extends each of the lower four 8-bit integer elements of a 128-bit vector of [16 x i8] to 32-bit...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sll_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvttpd_epu32(__mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_compress_epi64(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_set1_epi64(__m128i __O, __mmask8 __M, long long __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_expand_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_unpackhi_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_storeu_epi32(void *__P, __mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_permutexvar_epi64(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_min_epu64(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtepi32_epi16(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi64_epi32(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_or_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mul_pd(__m128d __a, __m128d __b)
Multiplies two 128-bit vectors of [2 x double].
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvttps_epu32(__m256 __A)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi32_storeu_epi16(void *__P, __mmask8 __M, __m128i __A)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_setzero_pd(void)
Constructs a 256-bit floating-point vector of [4 x double] with all vector elements initialized to ze...
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtepi32_epi8(__m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi64_storeu_epi32(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_permutex2var_epi32(__m128i __A, __mmask8 __U, __m128i __I, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_scalef_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttps_epu32(__m128 __A)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_max_pd(__m256d __a, __m256d __b)
Compares two 256-bit vectors of [4 x double] and returns the greater of each pair of values...
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_storeu_pd(void *__P, __mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_abs_epi32(__mmask8 __U, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_load_ps(__mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_unpackhi_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_unpackhi_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtpd_epi32(__m128i __W, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_broadcastd_epi32(__m128i __X)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_rolv_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_storeu_ps(void *__P, __mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_compress_epi64(__mmask8 __U, __m256i __A)
#define _mm256_mask_cmpneq_epi32_mask(k, A, B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_permutex2var_pd(__m128d __A, __m128i __I, __m128d __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtepi32_ps(__m128i __a)
Converts a vector of [4 x i32] into a vector of [4 x float].
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_abs_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_rcp14_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_xor_epi32(__m256i __a, __m256i __b)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_max_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtusepi64_epi32(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_rorv_epi64(__m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sra_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_cvtph_ps(__m256 __W, __mmask8 __U, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_sub_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_broadcastq_epi64(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi64_storeu_epi32(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_load_epi32(__mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_fmsubadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi64_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_unpackhi_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_blend_pd(__mmask8 __U, __m256d __A, __m256d __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_max_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_add_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
#define _mm_mask_cmpneq_epi32_mask(k, A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srl_epi64(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu8_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvttps_epi32(__m256i __W, __mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_compress_epi32(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS256 _mm256_mask_cvtpd_ps(__m128 __W, __mmask8 __U, __m256d __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask2_permutex2var_pd(__m256d __A, __m256i __I, __mmask8 __U, __m256d __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi64_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
#define _mm_cmpeq_epi64_mask(A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvttpd_epi32(__mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_expand_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_rorv_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtsepi64_epi8(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_set1_epi64(__mmask8 __M, long long __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm256_cvtsepi32_epi8(__m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_rcp14_ps(__mmask8 __U, __m128 __A)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_cvtps_pd(__m128 __a)
Converts a 128-bit vector of [4 x float] into a 256-bit vector of [4 x double].
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sll_epi32(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu8_epi32(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask3_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_add_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srav_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_store_epi64(void *__P, __mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_getexp_ps(__m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_min_epu64(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi8_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu16_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_min_pd(__m256d __a, __m256d __b)
Compares two 256-bit vectors of [4 x double] and returns the lesser of each pair of values...
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_sqrt_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi64_epi16(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_broadcast_f32x4(__m256 __O, __mmask8 __M, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_expandloadu_epi32(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_andnot_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_xor_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_mul_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_store_pd(void *__P, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtusepi64_epi8(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_permutex2var_epi32(__m256i __A, __m256i __I, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_abs_epi64(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_add_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_cvtpd_ps(__m128 __W, __mmask8 __U, __m128d __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtusepi32_epi8(__mmask8 __M, __m256i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_permutex2var_pd(__m256d __A, __m256i __I, __m256d __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_expand_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtsepi32_epi16(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_min_epi32(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_max_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtepi64_epi16(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_fmaddsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi16_epi64(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_mask_test_epi64_mask(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_storeu_epi64(void *__P, __mmask8 __U, __m256i __A)