30 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.1"))) 33 #define _MM_FROUND_TO_NEAREST_INT 0x00 34 #define _MM_FROUND_TO_NEG_INF 0x01 35 #define _MM_FROUND_TO_POS_INF 0x02 36 #define _MM_FROUND_TO_ZERO 0x03 37 #define _MM_FROUND_CUR_DIRECTION 0x04 39 #define _MM_FROUND_RAISE_EXC 0x00 40 #define _MM_FROUND_NO_EXC 0x08 42 #define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT) 43 #define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF) 44 #define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF) 45 #define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO) 46 #define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION) 47 #define _MM_FROUND_NEARBYINT (_MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTION) 64 #define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL) 81 #define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL) 106 #define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL) 131 #define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL) 148 #define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR) 165 #define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR) 190 #define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR) 215 #define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR) 247 #define _mm_round_ps(X, M) __extension__ ({ \ 248 (__m128)__builtin_ia32_roundps((__v4sf)(__m128)(X), (M)); }) 288 #define _mm_round_ss(X, Y, M) __extension__ ({ \ 289 (__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), \ 290 (__v4sf)(__m128)(Y), (M)); }) 322 #define _mm_round_pd(X, M) __extension__ ({ \ 323 (__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(X), (M)); }) 363 #define _mm_round_sd(X, Y, M) __extension__ ({ \ 364 (__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), \ 365 (__v2df)(__m128d)(Y), (M)); }) 392 #define _mm_blend_pd(V1, V2, M) __extension__ ({ \ 393 (__m128d)__builtin_shufflevector((__v2df)(__m128d)(V1), \ 394 (__v2df)(__m128d)(V2), \ 395 (((M) & 0x01) ? 2 : 0), \ 396 (((M) & 0x02) ? 3 : 1)); }) 422 #define _mm_blend_ps(V1, V2, M) __extension__ ({ \ 423 (__m128)__builtin_shufflevector((__v4sf)(__m128)(V1), (__v4sf)(__m128)(V2), \ 424 (((M) & 0x01) ? 4 : 0), \ 425 (((M) & 0x02) ? 5 : 1), \ 426 (((M) & 0x04) ? 6 : 2), \ 427 (((M) & 0x08) ? 7 : 3)); }) 452 return (__m128d) __builtin_ia32_blendvpd ((__v2df)__V1, (__v2df)__V2,
479 return (__m128) __builtin_ia32_blendvps ((__v4sf)__V1, (__v4sf)__V2,
506 return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__V1, (__v16qi)__V2,
534 #define _mm_blend_epi16(V1, V2, M) __extension__ ({ \ 535 (__m128i)__builtin_shufflevector((__v8hi)(__m128i)(V1), \ 536 (__v8hi)(__m128i)(V2), \ 537 (((M) & 0x01) ? 8 : 0), \ 538 (((M) & 0x02) ? 9 : 1), \ 539 (((M) & 0x04) ? 10 : 2), \ 540 (((M) & 0x08) ? 11 : 3), \ 541 (((M) & 0x10) ? 12 : 4), \ 542 (((M) & 0x20) ? 13 : 5), \ 543 (((M) & 0x40) ? 14 : 6), \ 544 (((M) & 0x80) ? 15 : 7)); }) 563 return (__m128i) ((__v4su)__V1 * (__v4su)__V2);
583 return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__V1, (__v4si)__V2);
619 #define _mm_dp_ps(X, Y, M) __extension__ ({ \ 620 (__m128) __builtin_ia32_dpps((__v4sf)(__m128)(X), \ 621 (__v4sf)(__m128)(Y), (M)); }) 654 #define _mm_dp_pd(X, Y, M) __extension__ ({\ 655 (__m128d) __builtin_ia32_dppd((__v2df)(__m128d)(X), \ 656 (__v2df)(__m128d)(Y), (M)); }) 674 return (__m128i) __builtin_nontemporal_load ((
const __v2di *) __V);
694 return (__m128i) __builtin_ia32_pminsb128 ((__v16qi) __V1, (__v16qi) __V2);
713 return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi) __V1, (__v16qi) __V2);
732 return (__m128i) __builtin_ia32_pminuw128 ((__v8hi) __V1, (__v8hi) __V2);
751 return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi) __V1, (__v8hi) __V2);
770 return (__m128i) __builtin_ia32_pminsd128 ((__v4si) __V1, (__v4si) __V2);
789 return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si) __V1, (__v4si) __V2);
808 return (__m128i) __builtin_ia32_pminud128((__v4si) __V1, (__v4si) __V2);
827 return (__m128i) __builtin_ia32_pmaxud128((__v4si) __V1, (__v4si) __V2);
871 #define _mm_insert_ps(X, Y, N) __builtin_ia32_insertps128((X), (Y), (N)) 895 #define _mm_extract_ps(X, N) (__extension__ \ 896 ({ union { int __i; float __f; } __t; \ 897 __v4sf __a = (__v4sf)(__m128)(X); \ 898 __t.__f = __a[(N) & 3]; \ 903 #define _MM_EXTRACT_FLOAT(D, X, N) (__extension__ ({ __v4sf __a = (__v4sf)(X); \ 908 #define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z)) 911 #define _MM_PICK_OUT_PS(X, N) _mm_insert_ps (_mm_setzero_ps(), (X), \ 912 _MM_MK_INSERTPS_NDX((N), 0, 0x0e)) 955 #define _mm_insert_epi8(X, I, N) (__extension__ \ 956 ({ __v16qi __a = (__v16qi)(__m128i)(X); \ 957 __a[(N) & 15] = (I); \ 988 #define _mm_insert_epi32(X, I, N) (__extension__ \ 989 ({ __v4si __a = (__v4si)(__m128i)(X); \ 990 __a[(N) & 3] = (I); \ 1020 #define _mm_insert_epi64(X, I, N) (__extension__ \ 1021 ({ __v2di __a = (__v2di)(__m128i)(X); \ 1022 __a[(N) & 1] = (I); \ 1064 #define _mm_extract_epi8(X, N) (__extension__ \ 1065 ({ __v16qi __a = (__v16qi)(__m128i)(X); \ 1066 (int)(unsigned char) __a[(N) & 15];})) 1090 #define _mm_extract_epi32(X, N) (__extension__ \ 1091 ({ __v4si __a = (__v4si)(__m128i)(X); \ 1092 (int)__a[(N) & 3];})) 1114 #define _mm_extract_epi64(X, N) (__extension__ \ 1115 ({ __v2di __a = (__v2di)(__m128i)(X); \ 1116 (long long)__a[(N) & 1];})) 1135 return __builtin_ia32_ptestz128((__v2di)__M, (__v2di)__V);
1153 return __builtin_ia32_ptestc128((__v2di)__M, (__v2di)__V);
1172 return __builtin_ia32_ptestnzc128((__v2di)__M, (__v2di)__V);
1190 #define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_cmpeq_epi32((V), (V))) 1209 #define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V)) 1227 #define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V)) 1245 return (__m128i)((__v2di)__V1 == (__v2di)__V2);
1267 return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi);
1288 return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4si);
1309 return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di);
1328 return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si);
1347 return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di);
1366 return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4si)__V, (__v4si)__V, 0, 1), __v2di);
1386 return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi);
1405 return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4si);
1424 return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1), __v2di);
1443 return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4si);
1462 return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1), __v2di);
1481 return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4su)__V, (__v4su)__V, 0, 1), __v2di);
1510 return (__m128i) __builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2);
1549 #define _mm_mpsadbw_epu8(X, Y, M) __extension__ ({ \ 1550 (__m128i) __builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X), \ 1551 (__v16qi)(__m128i)(Y), (M)); }) 1569 return (__m128i) __builtin_ia32_phminposuw128((__v8hi)__V);
1577 #undef __DEFAULT_FN_ATTRS 1578 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.2"))) 1581 #define _SIDD_UBYTE_OPS 0x00 1582 #define _SIDD_UWORD_OPS 0x01 1583 #define _SIDD_SBYTE_OPS 0x02 1584 #define _SIDD_SWORD_OPS 0x03 1587 #define _SIDD_CMP_EQUAL_ANY 0x00 1588 #define _SIDD_CMP_RANGES 0x04 1589 #define _SIDD_CMP_EQUAL_EACH 0x08 1590 #define _SIDD_CMP_EQUAL_ORDERED 0x0c 1593 #define _SIDD_POSITIVE_POLARITY 0x00 1594 #define _SIDD_NEGATIVE_POLARITY 0x10 1595 #define _SIDD_MASKED_POSITIVE_POLARITY 0x20 1596 #define _SIDD_MASKED_NEGATIVE_POLARITY 0x30 1599 #define _SIDD_LEAST_SIGNIFICANT 0x00 1600 #define _SIDD_MOST_SIGNIFICANT 0x40 1603 #define _SIDD_BIT_MASK 0x00 1604 #define _SIDD_UNIT_MASK 0x40 1659 #define _mm_cmpistrm(A, B, M) \ 1660 (__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(A), \ 1661 (__v16qi)(__m128i)(B), (int)(M)) 1713 #define _mm_cmpistri(A, B, M) \ 1714 (int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(A), \ 1715 (__v16qi)(__m128i)(B), (int)(M)) 1773 #define _mm_cmpestrm(A, LA, B, LB, M) \ 1774 (__m128i)__builtin_ia32_pcmpestrm128((__v16qi)(__m128i)(A), (int)(LA), \ 1775 (__v16qi)(__m128i)(B), (int)(LB), \ 1832 #define _mm_cmpestri(A, LA, B, LB, M) \ 1833 (int)__builtin_ia32_pcmpestri128((__v16qi)(__m128i)(A), (int)(LA), \ 1834 (__v16qi)(__m128i)(B), (int)(LB), \ 1884 #define _mm_cmpistra(A, B, M) \ 1885 (int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(A), \ 1886 (__v16qi)(__m128i)(B), (int)(M)) 1933 #define _mm_cmpistrc(A, B, M) \ 1934 (int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(A), \ 1935 (__v16qi)(__m128i)(B), (int)(M)) 1981 #define _mm_cmpistro(A, B, M) \ 1982 (int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(A), \ 1983 (__v16qi)(__m128i)(B), (int)(M)) 2031 #define _mm_cmpistrs(A, B, M) \ 2032 (int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(A), \ 2033 (__v16qi)(__m128i)(B), (int)(M)) 2081 #define _mm_cmpistrz(A, B, M) \ 2082 (int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(A), \ 2083 (__v16qi)(__m128i)(B), (int)(M)) 2135 #define _mm_cmpestra(A, LA, B, LB, M) \ 2136 (int)__builtin_ia32_pcmpestria128((__v16qi)(__m128i)(A), (int)(LA), \ 2137 (__v16qi)(__m128i)(B), (int)(LB), \ 2189 #define _mm_cmpestrc(A, LA, B, LB, M) \ 2190 (int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(A), (int)(LA), \ 2191 (__v16qi)(__m128i)(B), (int)(LB), \ 2242 #define _mm_cmpestro(A, LA, B, LB, M) \ 2243 (int)__builtin_ia32_pcmpestrio128((__v16qi)(__m128i)(A), (int)(LA), \ 2244 (__v16qi)(__m128i)(B), (int)(LB), \ 2297 #define _mm_cmpestrs(A, LA, B, LB, M) \ 2298 (int)__builtin_ia32_pcmpestris128((__v16qi)(__m128i)(A), (int)(LA), \ 2299 (__v16qi)(__m128i)(B), (int)(LB), \ 2351 #define _mm_cmpestrz(A, LA, B, LB, M) \ 2352 (int)__builtin_ia32_pcmpestriz128((__v16qi)(__m128i)(A), (int)(LA), \ 2353 (__v16qi)(__m128i)(B), (int)(LB), \ 2373 return (__m128i)((__v2di)__V1 > (__v2di)__V2);
2394 return __builtin_ia32_crc32qi(__C, __D);
2414 return __builtin_ia32_crc32hi(__C, __D);
2434 return __builtin_ia32_crc32si(__C, __D);
2453 _mm_crc32_u64(
unsigned long long __C,
unsigned long long __D)
2455 return __builtin_ia32_crc32di(__C, __D);
2459 #undef __DEFAULT_FN_ATTRS static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi32(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [4 x i32] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi64(__m128i __V)
Sign-extends each of the lower two 16-bit integer elements of a 128-bit integer vector of [8 x i16] t...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu32(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [4 x u32] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
Compares each of the corresponding 64-bit values of the 128-bit integer vectors for equality...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi32(__m128i __V1, __m128i __V2)
Converts 32-bit signed integers from both 128-bit integer vector operands into 16-bit unsigned intege...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi32(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [4 x i32] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_stream_load_si128(__m128i const *__V)
Loads integer values from a 128-bit aligned memory location to a 128-bit integer vector.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi64(__m128i __V)
Zero-extends each of the lower two 16-bit integer elements of a 128-bit integer vector of [8 x i16] t...
static __inline__ unsigned int __DEFAULT_FN_ATTRS _mm_crc32_u16(unsigned int __C, unsigned short __D)
Adds the unsigned integer operand to the CRC-32C checksum of the unsigned short operand.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_blendv_pd(__m128d __V1, __m128d __V2, __m128d __M)
Returns a 128-bit vector of [2 x double] where the values are selected from either the first or secon...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi8(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [16 x i8] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi32_epi64(__m128i __V)
Sign-extends each of the lower two 32-bit integer elements of a 128-bit integer vector of [4 x i32] t...
static __inline__ unsigned int __DEFAULT_FN_ATTRS _mm_crc32_u32(unsigned int __C, unsigned int __D)
Adds the first unsigned integer operand to the CRC-32C checksum of the second unsigned integer operan...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi32(__m128i __V1, __m128i __V2)
Multiples corresponding elements of two 128-bit vectors of [4 x i32] and returns the lower 32 bits of...
static __inline__ int __DEFAULT_FN_ATTRS _mm_testz_si128(__m128i __M, __m128i __V)
Tests whether the specified bits in a 128-bit integer vector are all zeros.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_blendv_epi8(__m128i __V1, __m128i __V2, __m128i __M)
Returns a 128-bit vector of [16 x i8] where the values are selected from either of the first or secon...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi64(__m128i __V1, __m128i __V2)
Compares each of the corresponding 64-bit values of the 128-bit integer vectors to determine if the v...
static __inline__ int __DEFAULT_FN_ATTRS _mm_testnzc_si128(__m128i __M, __m128i __V)
Tests whether the specified bits in a 128-bit integer vector are neither all zeros nor all ones...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi16(__m128i __V)
Zero-extends each of the lower eight 8-bit integer elements of a 128-bit vector of [16 x i8] to 16-bi...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu16(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [8 x u16] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu16(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [8 x u16] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epi32(__m128i __V1, __m128i __V2)
Multiplies corresponding even-indexed elements of two 128-bit vectors of [4 x i32] and returns a 128-...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi64(__m128i __V)
Zero-extends each of the lower two 8-bit integer elements of a 128-bit integer vector of [16 x i8] to...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_blendv_ps(__m128 __V1, __m128 __V2, __m128 __M)
Returns a 128-bit vector of [4 x float] where the values are selected from either the first or second...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi8(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [16 x i8] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi32(__m128i __V)
Zero-extends each of the lower four 16-bit integer elements of a 128-bit integer vector of [8 x i16] ...
static __inline__ unsigned int __DEFAULT_FN_ATTRS _mm_crc32_u8(unsigned int __C, unsigned char __D)
Adds the unsigned integer operand to the CRC-32C checksum of the unsigned char operand.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi64(__m128i __V)
Sign-extends each of the lower two 8-bit integer elements of a 128-bit integer vector of [16 x i8] to...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_minpos_epu16(__m128i __V)
Finds the minimum unsigned 16-bit element in the input 128-bit vector of [8 x u16] and returns it and...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu32(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [4 x u32] and returns a 128-bit vector ...
#define __DEFAULT_FN_ATTRS
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi32(__m128i __V)
Sign-extends each of the lower four 16-bit integer elements of a 128-bit integer vector of [8 x i16] ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi32(__m128i __V)
Sign-extends each of the lower four 8-bit integer elements of a 128-bit vector of [16 x i8] to 32-bit...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu32_epi64(__m128i __V)
Zero-extends each of the lower two 32-bit integer elements of a 128-bit integer vector of [4 x i32] t...
static __inline__ int __DEFAULT_FN_ATTRS _mm_testc_si128(__m128i __M, __m128i __V)
Tests whether the specified bits in a 128-bit integer vector are all ones.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi32(__m128i __V)
Zero-extends each of the lower four 8-bit integer elements of a 128-bit vector of [16 x i8] to 32-bit...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi16(__m128i __V)
Sign-extends each of the lower eight 8-bit integer elements of a 128-bit vector of [16 x i8] to 16-bi...