13 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 14 #ifndef cl_khr_depth_images 15 #define cl_khr_depth_images 16 #endif //cl_khr_depth_images 17 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 19 #if __OPENCL_C_VERSION__ < CL_VERSION_2_0 20 #ifdef cl_khr_3d_image_writes 21 #pragma OPENCL EXTENSION cl_khr_3d_image_writes : enable 22 #endif //cl_khr_3d_image_writes 23 #endif //__OPENCL_C_VERSION__ < CL_VERSION_2_0 25 #if __OPENCL_C_VERSION__ >= CL_VERSION_1_2 26 #ifndef cl_intel_planar_yuv 27 #define cl_intel_planar_yuv 28 #endif // cl_intel_planar_yuv 29 #pragma OPENCL EXTENSION cl_intel_planar_yuv : begin 30 #pragma OPENCL EXTENSION cl_intel_planar_yuv : end 31 #endif // __OPENCL_C_VERSION__ >= CL_VERSION_1_2 33 #define __ovld __attribute__((overloadable)) 34 #define __conv __attribute__((convergent)) 37 #define __purefn __attribute__((pure)) 38 #define __cnfn __attribute__((const)) 139 #pragma OPENCL EXTENSION cl_khr_fp16 : enable 147 #if __OPENCL_C_VERSION__ < CL_VERSION_1_2 148 #pragma OPENCL EXTENSION cl_khr_fp64 : enable 154 typedef double double16
__attribute__((ext_vector_type(16)));
157 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 158 #define NULL ((void*)0) 165 #define MAXFLOAT 0x1.fffffep127f 172 #define HUGE_VALF (__builtin_huge_valf()) 179 #define HUGE_VAL (__builtin_huge_val()) 185 #define INFINITY (__builtin_inff()) 190 #define NAN as_float(INT_MAX) 192 #define FP_ILOGB0 INT_MIN 193 #define FP_ILOGBNAN INT_MAX 196 #define FLT_MANT_DIG 24 197 #define FLT_MAX_10_EXP +38 198 #define FLT_MAX_EXP +128 199 #define FLT_MIN_10_EXP -37 200 #define FLT_MIN_EXP -125 202 #define FLT_MAX 0x1.fffffep127f 203 #define FLT_MIN 0x1.0p-126f 204 #define FLT_EPSILON 0x1.0p-23f 206 #define M_E_F 2.71828182845904523536028747135266250f 207 #define M_LOG2E_F 1.44269504088896340735992468100189214f 208 #define M_LOG10E_F 0.434294481903251827651128918916605082f 209 #define M_LN2_F 0.693147180559945309417232121458176568f 210 #define M_LN10_F 2.30258509299404568401799145468436421f 211 #define M_PI_F 3.14159265358979323846264338327950288f 212 #define M_PI_2_F 1.57079632679489661923132169163975144f 213 #define M_PI_4_F 0.785398163397448309615660845819875721f 214 #define M_1_PI_F 0.318309886183790671537767526745028724f 215 #define M_2_PI_F 0.636619772367581343075535053490057448f 216 #define M_2_SQRTPI_F 1.12837916709551257389615890312154517f 217 #define M_SQRT2_F 1.41421356237309504880168872420969808f 218 #define M_SQRT1_2_F 0.707106781186547524400844362104849039f 221 #define DBL_MANT_DIG 53 222 #define DBL_MAX_10_EXP +308 223 #define DBL_MAX_EXP +1024 224 #define DBL_MIN_10_EXP -307 225 #define DBL_MIN_EXP -1021 227 #define DBL_MAX 0x1.fffffffffffffp1023 228 #define DBL_MIN 0x1.0p-1022 229 #define DBL_EPSILON 0x1.0p-52 231 #define M_E 0x1.5bf0a8b145769p+1 232 #define M_LOG2E 0x1.71547652b82fep+0 233 #define M_LOG10E 0x1.bcb7b1526e50ep-2 234 #define M_LN2 0x1.62e42fefa39efp-1 235 #define M_LN10 0x1.26bb1bbb55516p+1 236 #define M_PI 0x1.921fb54442d18p+1 237 #define M_PI_2 0x1.921fb54442d18p+0 238 #define M_PI_4 0x1.921fb54442d18p-1 239 #define M_1_PI 0x1.45f306dc9c883p-2 240 #define M_2_PI 0x1.45f306dc9c883p-1 241 #define M_2_SQRTPI 0x1.20dd750429b6dp+0 242 #define M_SQRT2 0x1.6a09e667f3bcdp+0 243 #define M_SQRT1_2 0x1.6a09e667f3bcdp-1 248 #define HALF_MANT_DIG 11 249 #define HALF_MAX_10_EXP +4 250 #define HALF_MAX_EXP +16 251 #define HALF_MIN_10_EXP -4 252 #define HALF_MIN_EXP -13 254 #define HALF_MAX ((0x1.ffcp15h)) 255 #define HALF_MIN ((0x1.0p-14h)) 256 #define HALF_EPSILON ((0x1.0p-10h)) 258 #define M_E_H 2.71828182845904523536028747135266250h 259 #define M_LOG2E_H 1.44269504088896340735992468100189214h 260 #define M_LOG10E_H 0.434294481903251827651128918916605082h 261 #define M_LN2_H 0.693147180559945309417232121458176568h 262 #define M_LN10_H 2.30258509299404568401799145468436421h 263 #define M_PI_H 3.14159265358979323846264338327950288h 264 #define M_PI_2_H 1.57079632679489661923132169163975144h 265 #define M_PI_4_H 0.785398163397448309615660845819875721h 266 #define M_1_PI_H 0.318309886183790671537767526745028724h 267 #define M_2_PI_H 0.636619772367581343075535053490057448h 268 #define M_2_SQRTPI_H 1.12837916709551257389615890312154517h 269 #define M_SQRT2_H 1.41421356237309504880168872420969808h 270 #define M_SQRT1_2_H 0.707106781186547524400844362104849039h 275 #define SCHAR_MAX 127 276 #define SCHAR_MIN (-128) 277 #define UCHAR_MAX 255 278 #define CHAR_MAX SCHAR_MAX 279 #define CHAR_MIN SCHAR_MIN 280 #define USHRT_MAX 65535 281 #define SHRT_MAX 32767 282 #define SHRT_MIN (-32768) 283 #define UINT_MAX 0xffffffff 284 #define INT_MAX 2147483647 285 #define INT_MIN (-2147483647-1) 286 #define ULONG_MAX 0xffffffffffffffffUL 287 #define LONG_MAX 0x7fffffffffffffffL 288 #define LONG_MIN (-0x7fffffffffffffffL-1) 5706 #endif //cl_khr_fp64 6587 #endif //cl_khr_fp64 6589 #endif // cl_khr_fp16 6595 #define as_char(x) __builtin_astype((x), char) 6596 #define as_char2(x) __builtin_astype((x), char2) 6597 #define as_char3(x) __builtin_astype((x), char3) 6598 #define as_char4(x) __builtin_astype((x), char4) 6599 #define as_char8(x) __builtin_astype((x), char8) 6600 #define as_char16(x) __builtin_astype((x), char16) 6602 #define as_uchar(x) __builtin_astype((x), uchar) 6603 #define as_uchar2(x) __builtin_astype((x), uchar2) 6604 #define as_uchar3(x) __builtin_astype((x), uchar3) 6605 #define as_uchar4(x) __builtin_astype((x), uchar4) 6606 #define as_uchar8(x) __builtin_astype((x), uchar8) 6607 #define as_uchar16(x) __builtin_astype((x), uchar16) 6609 #define as_short(x) __builtin_astype((x), short) 6610 #define as_short2(x) __builtin_astype((x), short2) 6611 #define as_short3(x) __builtin_astype((x), short3) 6612 #define as_short4(x) __builtin_astype((x), short4) 6613 #define as_short8(x) __builtin_astype((x), short8) 6614 #define as_short16(x) __builtin_astype((x), short16) 6616 #define as_ushort(x) __builtin_astype((x), ushort) 6617 #define as_ushort2(x) __builtin_astype((x), ushort2) 6618 #define as_ushort3(x) __builtin_astype((x), ushort3) 6619 #define as_ushort4(x) __builtin_astype((x), ushort4) 6620 #define as_ushort8(x) __builtin_astype((x), ushort8) 6621 #define as_ushort16(x) __builtin_astype((x), ushort16) 6623 #define as_int(x) __builtin_astype((x), int) 6624 #define as_int2(x) __builtin_astype((x), int2) 6625 #define as_int3(x) __builtin_astype((x), int3) 6626 #define as_int4(x) __builtin_astype((x), int4) 6627 #define as_int8(x) __builtin_astype((x), int8) 6628 #define as_int16(x) __builtin_astype((x), int16) 6630 #define as_uint(x) __builtin_astype((x), uint) 6631 #define as_uint2(x) __builtin_astype((x), uint2) 6632 #define as_uint3(x) __builtin_astype((x), uint3) 6633 #define as_uint4(x) __builtin_astype((x), uint4) 6634 #define as_uint8(x) __builtin_astype((x), uint8) 6635 #define as_uint16(x) __builtin_astype((x), uint16) 6637 #define as_long(x) __builtin_astype((x), long) 6638 #define as_long2(x) __builtin_astype((x), long2) 6639 #define as_long3(x) __builtin_astype((x), long3) 6640 #define as_long4(x) __builtin_astype((x), long4) 6641 #define as_long8(x) __builtin_astype((x), long8) 6642 #define as_long16(x) __builtin_astype((x), long16) 6644 #define as_ulong(x) __builtin_astype((x), ulong) 6645 #define as_ulong2(x) __builtin_astype((x), ulong2) 6646 #define as_ulong3(x) __builtin_astype((x), ulong3) 6647 #define as_ulong4(x) __builtin_astype((x), ulong4) 6648 #define as_ulong8(x) __builtin_astype((x), ulong8) 6649 #define as_ulong16(x) __builtin_astype((x), ulong16) 6651 #define as_float(x) __builtin_astype((x), float) 6652 #define as_float2(x) __builtin_astype((x), float2) 6653 #define as_float3(x) __builtin_astype((x), float3) 6654 #define as_float4(x) __builtin_astype((x), float4) 6655 #define as_float8(x) __builtin_astype((x), float8) 6656 #define as_float16(x) __builtin_astype((x), float16) 6659 #define as_double(x) __builtin_astype((x), double) 6660 #define as_double2(x) __builtin_astype((x), double2) 6661 #define as_double3(x) __builtin_astype((x), double3) 6662 #define as_double4(x) __builtin_astype((x), double4) 6663 #define as_double8(x) __builtin_astype((x), double8) 6664 #define as_double16(x) __builtin_astype((x), double16) 6665 #endif //cl_khr_fp64 6668 #define as_half(x) __builtin_astype((x), half) 6669 #define as_half2(x) __builtin_astype((x), half2) 6670 #define as_half3(x) __builtin_astype((x), half3) 6671 #define as_half4(x) __builtin_astype((x), half4) 6672 #define as_half8(x) __builtin_astype((x), half8) 6673 #define as_half16(x) __builtin_astype((x), half16) 6674 #endif //cl_khr_fp16 6678 #define __kernel_exec(X, typen) __kernel \ 6679 __attribute__((work_group_size_hint(X, 1, 1))) \ 6680 __attribute__((vec_type_hint(typen))) 6682 #define kernel_exec(X, typen) __kernel \ 6683 __attribute__((work_group_size_hint(X, 1, 1))) \ 6684 __attribute__((vec_type_hint(typen))) 6771 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 6775 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 6795 #endif //cl_khr_fp64 6803 #endif //cl_khr_fp16 6821 #endif //cl_khr_fp64 6829 #endif //cl_khr_fp16 6847 #endif //cl_khr_fp64 6855 #endif //cl_khr_fp16 6873 #endif //cl_khr_fp64 6881 #endif //cl_khr_fp16 6899 #endif //cl_khr_fp64 6907 #endif //cl_khr_fp16 6925 #endif //cl_khr_fp64 6933 #endif //cl_khr_fp16 6951 #endif //cl_khr_fp64 6959 #endif //cl_khr_fp16 6977 #endif //cl_khr_fp64 6985 #endif //cl_khr_fp16 7003 #endif //cl_khr_fp64 7011 #endif //cl_khr_fp16 7029 #endif //cl_khr_fp64 7037 #endif //cl_khr_fp16 7055 #endif //cl_khr_fp64 7063 #endif //cl_khr_fp16 7081 #endif //cl_khr_fp64 7089 #endif //cl_khr_fp16 7108 #endif //cl_khr_fp64 7116 #endif //cl_khr_fp16 7134 #endif //cl_khr_fp64 7142 #endif //cl_khr_fp16 7160 #endif //cl_khr_fp64 7168 #endif //cl_khr_fp16 7186 #endif //cl_khr_fp64 7194 #endif //cl_khr_fp16 7212 #endif //cl_khr_fp64 7220 #endif //cl_khr_fp16 7238 #endif //cl_khr_fp64 7246 #endif //cl_khr_fp16 7265 #endif //cl_khr_fp64 7273 #endif //cl_khr_fp16 7291 #endif //cl_khr_fp64 7299 #endif //cl_khr_fp16 7317 #endif //cl_khr_fp64 7325 #endif //cl_khr_fp16 7343 #endif //cl_khr_fp64 7351 #endif //cl_khr_fp16 7369 #endif //cl_khr_fp64 7377 #endif //cl_khr_fp16 7395 #endif //cl_khr_fp64 7403 #endif //cl_khr_fp16 7421 #endif //cl_khr_fp64 7429 #endif //cl_khr_fp16 7448 #endif //cl_khr_fp64 7456 #endif //cl_khr_fp16 7478 #endif //cl_khr_fp64 7486 #endif //cl_khr_fp16 7517 #endif //cl_khr_fp64 7530 #endif //cl_khr_fp16 7561 #endif //cl_khr_fp64 7574 #endif //cl_khr_fp16 7592 #endif //cl_khr_fp64 7600 #endif //cl_khr_fp16 7606 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 7619 double16
__ovld fract(double16 x, double16 *iptr);
7620 #endif //cl_khr_fp64 7628 #endif //cl_khr_fp16 7630 float __ovld fract(
float x, __global
float *iptr);
7631 float2
__ovld fract(float2 x, __global float2 *iptr);
7632 float3
__ovld fract(float3 x, __global float3 *iptr);
7633 float4
__ovld fract(float4 x, __global float4 *iptr);
7634 float8
__ovld fract(float8 x, __global float8 *iptr);
7635 float16
__ovld fract(float16 x, __global float16 *iptr);
7637 float2
__ovld fract(float2 x, __local float2 *iptr);
7638 float3
__ovld fract(float3 x, __local float3 *iptr);
7639 float4
__ovld fract(float4 x, __local float4 *iptr);
7640 float8
__ovld fract(float8 x, __local float8 *iptr);
7641 float16
__ovld fract(float16 x, __local float16 *iptr);
7642 float __ovld fract(
float x, __private
float *iptr);
7643 float2
__ovld fract(float2 x, __private float2 *iptr);
7644 float3
__ovld fract(float3 x, __private float3 *iptr);
7645 float4
__ovld fract(float4 x, __private float4 *iptr);
7646 float8
__ovld fract(float8 x, __private float8 *iptr);
7647 float16
__ovld fract(float16 x, __private float16 *iptr);
7649 double __ovld fract(
double x, __global
double *iptr);
7650 double2
__ovld fract(double2 x, __global double2 *iptr);
7651 double3
__ovld fract(double3 x, __global double3 *iptr);
7652 double4
__ovld fract(double4 x, __global double4 *iptr);
7653 double8
__ovld fract(double8 x, __global double8 *iptr);
7654 double16
__ovld fract(double16 x, __global double16 *iptr);
7655 double __ovld fract(
double x, __local
double *iptr);
7656 double2
__ovld fract(double2 x, __local double2 *iptr);
7657 double3
__ovld fract(double3 x, __local double3 *iptr);
7658 double4
__ovld fract(double4 x, __local double4 *iptr);
7659 double8
__ovld fract(double8 x, __local double8 *iptr);
7660 double16
__ovld fract(double16 x, __local double16 *iptr);
7661 double __ovld fract(
double x, __private
double *iptr);
7662 double2
__ovld fract(double2 x, __private double2 *iptr);
7663 double3
__ovld fract(double3 x, __private double3 *iptr);
7664 double4
__ovld fract(double4 x, __private double4 *iptr);
7665 double8
__ovld fract(double8 x, __private double8 *iptr);
7666 double16
__ovld fract(double16 x, __private double16 *iptr);
7667 #endif //cl_khr_fp64 7670 half2
__ovld fract(half2 x, __global half2 *iptr);
7671 half3
__ovld fract(half3 x, __global half3 *iptr);
7672 half4
__ovld fract(half4 x, __global half4 *iptr);
7673 half8
__ovld fract(half8 x, __global half8 *iptr);
7674 half16
__ovld fract(half16 x, __global half16 *iptr);
7680 half16
__ovld fract(half16 x, __local half16 *iptr);
7682 half2
__ovld fract(half2 x, __private half2 *iptr);
7683 half3
__ovld fract(half3 x, __private half3 *iptr);
7684 half4
__ovld fract(half4 x, __private half4 *iptr);
7685 half8
__ovld fract(half8 x, __private half8 *iptr);
7686 half16
__ovld fract(half16 x, __private half16 *iptr);
7687 #endif //cl_khr_fp16 7688 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 7696 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 7710 #endif //cl_khr_fp64 7718 #endif //cl_khr_fp16 7757 #endif //cl_khr_fp64 7777 #endif //cl_khr_fp16 7778 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 7797 #endif //cl_khr_fp64 7805 #endif //cl_khr_fp16 7823 #endif //cl_khr_fp64 7831 #endif //cl_khr_fp16 7859 #endif //cl_khr_fp64 7872 #endif //cl_khr_fp16 7893 #endif //cl_khr_fp64 7901 #endif //cl_khr_fp16 7903 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 7917 #endif //cl_khr_fp64 7925 #endif //cl_khr_fp16 7964 #endif //cl_khr_fp64 7984 #endif //cl_khr_fp16 7985 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 8003 #endif //cl_khr_fp64 8011 #endif //cl_khr_fp16 8029 #endif //cl_khr_fp64 8037 #endif //cl_khr_fp16 8055 #endif //cl_khr_fp64 8063 #endif //cl_khr_fp16 8081 #endif //cl_khr_fp64 8089 #endif //cl_khr_fp16 8108 #endif //cl_khr_fp64 8116 #endif //cl_khr_fp16 8138 #endif //cl_khr_fp64 8146 #endif //cl_khr_fp16 8165 #endif //cl_khr_fp64 8173 #endif //cl_khr_fp16 8192 #endif //cl_khr_fp64 8200 #endif //cl_khr_fp16 8209 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 8215 float16
__ovld modf(float16 x, float16 *iptr);
8218 double2
__ovld modf(double2 x, double2 *iptr);
8219 double3
__ovld modf(double3 x, double3 *iptr);
8220 double4
__ovld modf(double4 x, double4 *iptr);
8221 double8
__ovld modf(double8 x, double8 *iptr);
8222 double16
__ovld modf(double16 x, double16 *iptr);
8223 #endif //cl_khr_fp64 8231 #endif //cl_khr_fp16 8233 float __ovld modf(
float x, __global
float *iptr);
8234 float2
__ovld modf(float2 x, __global float2 *iptr);
8235 float3
__ovld modf(float3 x, __global float3 *iptr);
8236 float4
__ovld modf(float4 x, __global float4 *iptr);
8237 float8
__ovld modf(float8 x, __global float8 *iptr);
8238 float16
__ovld modf(float16 x, __global float16 *iptr);
8239 float __ovld modf(
float x, __local
float *iptr);
8240 float2
__ovld modf(float2 x, __local float2 *iptr);
8241 float3
__ovld modf(float3 x, __local float3 *iptr);
8242 float4
__ovld modf(float4 x, __local float4 *iptr);
8243 float8
__ovld modf(float8 x, __local float8 *iptr);
8244 float16
__ovld modf(float16 x, __local float16 *iptr);
8245 float __ovld modf(
float x, __private
float *iptr);
8246 float2
__ovld modf(float2 x, __private float2 *iptr);
8247 float3
__ovld modf(float3 x, __private float3 *iptr);
8248 float4
__ovld modf(float4 x, __private float4 *iptr);
8249 float8
__ovld modf(float8 x, __private float8 *iptr);
8250 float16
__ovld modf(float16 x, __private float16 *iptr);
8252 double __ovld modf(
double x, __global
double *iptr);
8253 double2
__ovld modf(double2 x, __global double2 *iptr);
8254 double3
__ovld modf(double3 x, __global double3 *iptr);
8255 double4
__ovld modf(double4 x, __global double4 *iptr);
8256 double8
__ovld modf(double8 x, __global double8 *iptr);
8257 double16
__ovld modf(double16 x, __global double16 *iptr);
8258 double __ovld modf(
double x, __local
double *iptr);
8259 double2
__ovld modf(double2 x, __local double2 *iptr);
8260 double3
__ovld modf(double3 x, __local double3 *iptr);
8261 double4
__ovld modf(double4 x, __local double4 *iptr);
8262 double8
__ovld modf(double8 x, __local double8 *iptr);
8263 double16
__ovld modf(double16 x, __local double16 *iptr);
8264 double __ovld modf(
double x, __private
double *iptr);
8265 double2
__ovld modf(double2 x, __private double2 *iptr);
8266 double3
__ovld modf(double3 x, __private double3 *iptr);
8267 double4
__ovld modf(double4 x, __private double4 *iptr);
8268 double8
__ovld modf(double8 x, __private double8 *iptr);
8269 double16
__ovld modf(double16 x, __private double16 *iptr);
8270 #endif //cl_khr_fp64 8272 half
__ovld modf(half x, __global half *iptr);
8273 half2
__ovld modf(half2 x, __global half2 *iptr);
8274 half3
__ovld modf(half3 x, __global half3 *iptr);
8275 half4
__ovld modf(half4 x, __global half4 *iptr);
8276 half8
__ovld modf(half8 x, __global half8 *iptr);
8277 half16
__ovld modf(half16 x, __global half16 *iptr);
8278 half
__ovld modf(half x, __local half *iptr);
8279 half2
__ovld modf(half2 x, __local half2 *iptr);
8280 half3
__ovld modf(half3 x, __local half3 *iptr);
8281 half4
__ovld modf(half4 x, __local half4 *iptr);
8282 half8
__ovld modf(half8 x, __local half8 *iptr);
8283 half16
__ovld modf(half16 x, __local half16 *iptr);
8284 half
__ovld modf(half x, __private half *iptr);
8285 half2
__ovld modf(half2 x, __private half2 *iptr);
8286 half3
__ovld modf(half3 x, __private half3 *iptr);
8287 half4
__ovld modf(half4 x, __private half4 *iptr);
8288 half8
__ovld modf(half8 x, __private half8 *iptr);
8289 half16
__ovld modf(half16 x, __private half16 *iptr);
8290 #endif //cl_khr_fp16 8291 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 8310 #endif //cl_khr_fp64 8318 #endif //cl_khr_fp16 8340 #endif //cl_khr_fp64 8348 #endif //cl_khr_fp16 8366 #endif //cl_khr_fp64 8374 #endif //cl_khr_fp16 8392 #endif //cl_khr_fp64 8400 #endif //cl_khr_fp16 8418 #endif //cl_khr_fp64 8426 #endif //cl_khr_fp16 8447 #endif //cl_khr_fp64 8455 #endif //cl_khr_fp16 8469 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 8475 float16
__ovld remquo(float16 x, float16 y, int16 *quo);
8478 double2
__ovld remquo(double2 x, double2 y, int2 *quo);
8479 double3
__ovld remquo(double3 x, double3 y, int3 *quo);
8480 double4
__ovld remquo(double4 x, double4 y, int4 *quo);
8481 double8
__ovld remquo(double8 x, double8 y, int8 *quo);
8482 double16
__ovld remquo(double16 x, double16 y, int16 *quo);
8483 #endif //cl_khr_fp64 8492 #endif //cl_khr_fp16 8494 float __ovld remquo(
float x,
float y, __global
int *quo);
8495 float2
__ovld remquo(float2 x, float2 y, __global int2 *quo);
8496 float3
__ovld remquo(float3 x, float3 y, __global int3 *quo);
8497 float4
__ovld remquo(float4 x, float4 y, __global int4 *quo);
8498 float8
__ovld remquo(float8 x, float8 y, __global int8 *quo);
8499 float16
__ovld remquo(float16 x, float16 y, __global int16 *quo);
8500 float __ovld remquo(
float x,
float y, __local
int *quo);
8501 float2
__ovld remquo(float2 x, float2 y, __local int2 *quo);
8502 float3
__ovld remquo(float3 x, float3 y, __local int3 *quo);
8503 float4
__ovld remquo(float4 x, float4 y, __local int4 *quo);
8504 float8
__ovld remquo(float8 x, float8 y, __local int8 *quo);
8505 float16
__ovld remquo(float16 x, float16 y, __local int16 *quo);
8506 float __ovld remquo(
float x,
float y, __private
int *quo);
8507 float2
__ovld remquo(float2 x, float2 y, __private int2 *quo);
8508 float3
__ovld remquo(float3 x, float3 y, __private int3 *quo);
8509 float4
__ovld remquo(float4 x, float4 y, __private int4 *quo);
8510 float8
__ovld remquo(float8 x, float8 y, __private int8 *quo);
8511 float16
__ovld remquo(float16 x, float16 y, __private int16 *quo);
8513 double __ovld remquo(
double x,
double y, __global
int *quo);
8514 double2
__ovld remquo(double2 x, double2 y, __global int2 *quo);
8515 double3
__ovld remquo(double3 x, double3 y, __global int3 *quo);
8516 double4
__ovld remquo(double4 x, double4 y, __global int4 *quo);
8517 double8
__ovld remquo(double8 x, double8 y, __global int8 *quo);
8518 double16
__ovld remquo(double16 x, double16 y, __global int16 *quo);
8519 double __ovld remquo(
double x,
double y, __local
int *quo);
8520 double2
__ovld remquo(double2 x, double2 y, __local int2 *quo);
8521 double3
__ovld remquo(double3 x, double3 y, __local int3 *quo);
8522 double4
__ovld remquo(double4 x, double4 y, __local int4 *quo);
8523 double8
__ovld remquo(double8 x, double8 y, __local int8 *quo);
8524 double16
__ovld remquo(double16 x, double16 y, __local int16 *quo);
8525 double __ovld remquo(
double x,
double y, __private
int *quo);
8526 double2
__ovld remquo(double2 x, double2 y, __private int2 *quo);
8527 double3
__ovld remquo(double3 x, double3 y, __private int3 *quo);
8528 double4
__ovld remquo(double4 x, double4 y, __private int4 *quo);
8529 double8
__ovld remquo(double8 x, double8 y, __private int8 *quo);
8530 double16
__ovld remquo(double16 x, double16 y, __private int16 *quo);
8531 #endif //cl_khr_fp64 8534 half2
__ovld remquo(half2 x, half2 y, __global int2 *quo);
8535 half3
__ovld remquo(half3 x, half3 y, __global int3 *quo);
8536 half4
__ovld remquo(half4 x, half4 y, __global int4 *quo);
8537 half8
__ovld remquo(half8 x, half8 y, __global int8 *quo);
8538 half16
__ovld remquo(half16 x, half16 y, __global int16 *quo);
8540 half2
__ovld remquo(half2 x, half2 y, __local int2 *quo);
8541 half3
__ovld remquo(half3 x, half3 y, __local int3 *quo);
8542 half4
__ovld remquo(half4 x, half4 y, __local int4 *quo);
8543 half8
__ovld remquo(half8 x, half8 y, __local int8 *quo);
8544 half16
__ovld remquo(half16 x, half16 y, __local int16 *quo);
8545 half
__ovld remquo(half x, half y, __private
int *quo);
8546 half2
__ovld remquo(half2 x, half2 y, __private int2 *quo);
8547 half3
__ovld remquo(half3 x, half3 y, __private int3 *quo);
8548 half4
__ovld remquo(half4 x, half4 y, __private int4 *quo);
8549 half8
__ovld remquo(half8 x, half8 y, __private int8 *quo);
8550 half16
__ovld remquo(half16 x, half16 y, __private int16 *quo);
8551 #endif //cl_khr_fp16 8552 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 8572 #endif //cl_khr_fp64 8580 #endif //cl_khr_fp16 8598 #endif //cl_khr_fp64 8606 #endif //cl_khr_fp16 8626 #endif //cl_khr_fp64 8634 #endif //cl_khr_fp16 8652 #endif //cl_khr_fp64 8660 #endif //cl_khr_fp16 8678 #endif //cl_khr_fp64 8686 #endif //cl_khr_fp16 8693 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 8707 #endif //cl_khr_fp64 8715 #endif //cl_khr_fp16 8718 float2
__ovld sincos(float2 x, __global float2 *cosval);
8719 float3
__ovld sincos(float3 x, __global float3 *cosval);
8720 float4
__ovld sincos(float4 x, __global float4 *cosval);
8721 float8
__ovld sincos(float8 x, __global float8 *cosval);
8722 float16
__ovld sincos(float16 x, __global float16 *cosval);
8724 float2
__ovld sincos(float2 x, __local float2 *cosval);
8725 float3
__ovld sincos(float3 x, __local float3 *cosval);
8726 float4
__ovld sincos(float4 x, __local float4 *cosval);
8727 float8
__ovld sincos(float8 x, __local float8 *cosval);
8728 float16
__ovld sincos(float16 x, __local float16 *cosval);
8730 float2
__ovld sincos(float2 x, __private float2 *cosval);
8731 float3
__ovld sincos(float3 x, __private float3 *cosval);
8732 float4
__ovld sincos(float4 x, __private float4 *cosval);
8733 float8
__ovld sincos(float8 x, __private float8 *cosval);
8734 float16
__ovld sincos(float16 x, __private float16 *cosval);
8736 double __ovld sincos(
double x, __global
double *cosval);
8737 double2
__ovld sincos(double2 x, __global double2 *cosval);
8738 double3
__ovld sincos(double3 x, __global double3 *cosval);
8739 double4
__ovld sincos(double4 x, __global double4 *cosval);
8740 double8
__ovld sincos(double8 x, __global double8 *cosval);
8741 double16
__ovld sincos(double16 x, __global double16 *cosval);
8742 double __ovld sincos(
double x, __local
double *cosval);
8743 double2
__ovld sincos(double2 x, __local double2 *cosval);
8744 double3
__ovld sincos(double3 x, __local double3 *cosval);
8745 double4
__ovld sincos(double4 x, __local double4 *cosval);
8746 double8
__ovld sincos(double8 x, __local double8 *cosval);
8747 double16
__ovld sincos(double16 x, __local double16 *cosval);
8748 double __ovld sincos(
double x, __private
double *cosval);
8749 double2
__ovld sincos(double2 x, __private double2 *cosval);
8750 double3
__ovld sincos(double3 x, __private double3 *cosval);
8751 double4
__ovld sincos(double4 x, __private double4 *cosval);
8752 double8
__ovld sincos(double8 x, __private double8 *cosval);
8753 double16
__ovld sincos(double16 x, __private double16 *cosval);
8754 #endif //cl_khr_fp64 8761 half16
__ovld sincos(half16 x, __global half16 *cosval);
8767 half16
__ovld sincos(half16 x, __local half16 *cosval);
8773 half16
__ovld sincos(half16 x, __private half16 *cosval);
8774 #endif //cl_khr_fp16 8775 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 8793 #endif //cl_khr_fp64 8801 #endif //cl_khr_fp16 8819 #endif //cl_khr_fp64 8827 #endif //cl_khr_fp16 8845 #endif //cl_khr_fp64 8853 #endif //cl_khr_fp16 8871 #endif //cl_khr_fp64 8879 #endif //cl_khr_fp16 8897 #endif //cl_khr_fp64 8905 #endif //cl_khr_fp16 8923 #endif //cl_khr_fp64 8931 #endif //cl_khr_fp16 8949 #endif //cl_khr_fp64 8957 #endif //cl_khr_fp16 8976 #endif //cl_khr_fp64 8984 #endif //cl_khr_fp16 9708 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 9757 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 10398 #endif //cl_khr_fp64 10411 #endif //cl_khr_fp16 10430 #endif //cl_khr_fp64 10438 #endif //cl_khr_fp16 10467 #endif //cl_khr_fp64 10480 #endif //cl_khr_fp16 10509 #endif //cl_khr_fp64 10522 #endif //cl_khr_fp16 10554 #endif //cl_khr_fp64 10567 #endif //cl_khr_fp16 10586 #endif //cl_khr_fp64 10594 #endif //cl_khr_fp16 10622 #endif //cl_khr_fp64 10636 #endif //cl_khr_fp16 10674 #endif //cl_khr_fp64 10688 #endif //cl_khr_fp16 10707 #endif //cl_khr_fp64 10715 #endif //cl_khr_fp16 10728 #endif //cl_khr_fp64 10732 #endif //cl_khr_fp16 10746 #endif //cl_khr_fp64 10752 #endif //cl_khr_fp16 10767 #endif //cl_khr_fp64 10773 #endif //cl_khr_fp16 10788 #endif //cl_khr_fp64 10794 #endif //cl_khr_fp16 10809 #endif //cl_khr_fp64 10815 #endif //cl_khr_fp16 10829 #endif //cl_khr_fp16 10844 #endif //cl_khr_fp16 10876 #endif //cl_khr_fp16 10897 #endif //cl_khr_fp64 10905 #endif //cl_khr_fp16 10923 #endif //cl_khr_fp64 10931 #endif //cl_khr_fp16 10949 #endif //cl_khr_fp64 10957 #endif //cl_khr_fp16 10975 #endif //cl_khr_fp64 10983 #endif //cl_khr_fp16 11001 #endif //cl_khr_fp64 11009 #endif //cl_khr_fp16 11027 #endif //cl_khr_fp64 11035 #endif //cl_khr_fp16 11054 #endif //cl_khr_fp64 11062 #endif //cl_khr_fp16 11080 #endif //cl_khr_fp64 11088 #endif //cl_khr_fp16 11106 #endif //cl_khr_fp64 11114 #endif //cl_khr_fp16 11132 #endif //cl_khr_fp64 11140 #endif //cl_khr_fp16 11158 #endif //cl_khr_fp64 11166 #endif //cl_khr_fp16 11186 #endif //cl_khr_fp64 11194 #endif //cl_khr_fp16 11214 #endif //cl_khr_fp64 11222 #endif //cl_khr_fp16 11244 #endif //cl_khr_fp64 11252 #endif //cl_khr_fp16 11378 #endif //cl_khr_fp64 11386 #endif //cl_khr_fp16 11524 #endif //cl_khr_fp64 11538 #endif //cl_khr_fp16 11558 char2
__ovld vload2(
size_t offset,
const __constant
char *p);
11560 short2
__ovld vload2(
size_t offset,
const __constant
short *p);
11562 int2
__ovld vload2(
size_t offset,
const __constant
int *p);
11564 long2
__ovld vload2(
size_t offset,
const __constant
long *p);
11566 float2
__ovld vload2(
size_t offset,
const __constant
float *p);
11567 char3
__ovld vload3(
size_t offset,
const __constant
char *p);
11569 short3
__ovld vload3(
size_t offset,
const __constant
short *p);
11571 int3
__ovld vload3(
size_t offset,
const __constant
int *p);
11573 long3
__ovld vload3(
size_t offset,
const __constant
long *p);
11575 float3
__ovld vload3(
size_t offset,
const __constant
float *p);
11576 char4
__ovld vload4(
size_t offset,
const __constant
char *p);
11578 short4
__ovld vload4(
size_t offset,
const __constant
short *p);
11580 int4
__ovld vload4(
size_t offset,
const __constant
int *p);
11582 long4
__ovld vload4(
size_t offset,
const __constant
long *p);
11584 float4
__ovld vload4(
size_t offset,
const __constant
float *p);
11585 char8
__ovld vload8(
size_t offset,
const __constant
char *p);
11587 short8
__ovld vload8(
size_t offset,
const __constant
short *p);
11589 int8
__ovld vload8(
size_t offset,
const __constant
int *p);
11591 long8
__ovld vload8(
size_t offset,
const __constant
long *p);
11593 float8
__ovld vload8(
size_t offset,
const __constant
float *p);
11594 char16
__ovld vload16(
size_t offset,
const __constant
char *p);
11596 short16
__ovld vload16(
size_t offset,
const __constant
short *p);
11598 int16
__ovld vload16(
size_t offset,
const __constant
int *p);
11600 long16
__ovld vload16(
size_t offset,
const __constant
long *p);
11602 float16
__ovld vload16(
size_t offset,
const __constant
float *p);
11604 double2
__ovld vload2(
size_t offset,
const __constant
double *p);
11605 double3
__ovld vload3(
size_t offset,
const __constant
double *p);
11606 double4
__ovld vload4(
size_t offset,
const __constant
double *p);
11607 double8
__ovld vload8(
size_t offset,
const __constant
double *p);
11608 double16
__ovld vload16(
size_t offset,
const __constant
double *p);
11609 #endif //cl_khr_fp64 11612 half
__ovld vload(
size_t offset,
const __constant half *p);
11613 half2
__ovld vload2(
size_t offset,
const __constant half *p);
11614 half3
__ovld vload3(
size_t offset,
const __constant half *p);
11615 half4
__ovld vload4(
size_t offset,
const __constant half *p);
11616 half8
__ovld vload8(
size_t offset,
const __constant half *p);
11617 half16
__ovld vload16(
size_t offset,
const __constant half *p);
11618 #endif //cl_khr_fp16 11620 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 11668 double2
__ovld vload2(
size_t offset,
const double *p);
11669 double3
__ovld vload3(
size_t offset,
const double *p);
11670 double4
__ovld vload4(
size_t offset,
const double *p);
11671 double8
__ovld vload8(
size_t offset,
const double *p);
11673 #endif //cl_khr_fp64 11676 half
__ovld vload(
size_t offset,
const half *p);
11682 #endif //cl_khr_fp16 11684 char2
__ovld vload2(
size_t offset,
const __global
char *p);
11686 short2
__ovld vload2(
size_t offset,
const __global
short *p);
11688 int2
__ovld vload2(
size_t offset,
const __global
int *p);
11690 long2
__ovld vload2(
size_t offset,
const __global
long *p);
11692 float2
__ovld vload2(
size_t offset,
const __global
float *p);
11693 char3
__ovld vload3(
size_t offset,
const __global
char *p);
11695 short3
__ovld vload3(
size_t offset,
const __global
short *p);
11697 int3
__ovld vload3(
size_t offset,
const __global
int *p);
11699 long3
__ovld vload3(
size_t offset,
const __global
long *p);
11701 float3
__ovld vload3(
size_t offset,
const __global
float *p);
11702 char4
__ovld vload4(
size_t offset,
const __global
char *p);
11704 short4
__ovld vload4(
size_t offset,
const __global
short *p);
11706 int4
__ovld vload4(
size_t offset,
const __global
int *p);
11708 long4
__ovld vload4(
size_t offset,
const __global
long *p);
11710 float4
__ovld vload4(
size_t offset,
const __global
float *p);
11711 char8
__ovld vload8(
size_t offset,
const __global
char *p);
11713 short8
__ovld vload8(
size_t offset,
const __global
short *p);
11715 int8
__ovld vload8(
size_t offset,
const __global
int *p);
11717 long8
__ovld vload8(
size_t offset,
const __global
long *p);
11719 float8
__ovld vload8(
size_t offset,
const __global
float *p);
11720 char16
__ovld vload16(
size_t offset,
const __global
char *p);
11722 short16
__ovld vload16(
size_t offset,
const __global
short *p);
11724 int16
__ovld vload16(
size_t offset,
const __global
int *p);
11726 long16
__ovld vload16(
size_t offset,
const __global
long *p);
11728 float16
__ovld vload16(
size_t offset,
const __global
float *p);
11729 char2
__ovld vload2(
size_t offset,
const __local
char *p);
11731 short2
__ovld vload2(
size_t offset,
const __local
short *p);
11733 int2
__ovld vload2(
size_t offset,
const __local
int *p);
11735 long2
__ovld vload2(
size_t offset,
const __local
long *p);
11737 float2
__ovld vload2(
size_t offset,
const __local
float *p);
11738 char3
__ovld vload3(
size_t offset,
const __local
char *p);
11740 short3
__ovld vload3(
size_t offset,
const __local
short *p);
11742 int3
__ovld vload3(
size_t offset,
const __local
int *p);
11744 long3
__ovld vload3(
size_t offset,
const __local
long *p);
11746 float3
__ovld vload3(
size_t offset,
const __local
float *p);
11747 char4
__ovld vload4(
size_t offset,
const __local
char *p);
11749 short4
__ovld vload4(
size_t offset,
const __local
short *p);
11751 int4
__ovld vload4(
size_t offset,
const __local
int *p);
11753 long4
__ovld vload4(
size_t offset,
const __local
long *p);
11755 float4
__ovld vload4(
size_t offset,
const __local
float *p);
11756 char8
__ovld vload8(
size_t offset,
const __local
char *p);
11758 short8
__ovld vload8(
size_t offset,
const __local
short *p);
11760 int8
__ovld vload8(
size_t offset,
const __local
int *p);
11762 long8
__ovld vload8(
size_t offset,
const __local
long *p);
11764 float8
__ovld vload8(
size_t offset,
const __local
float *p);
11765 char16
__ovld vload16(
size_t offset,
const __local
char *p);
11767 short16
__ovld vload16(
size_t offset,
const __local
short *p);
11771 long16
__ovld vload16(
size_t offset,
const __local
long *p);
11773 float16
__ovld vload16(
size_t offset,
const __local
float *p);
11774 char2
__ovld vload2(
size_t offset,
const __private
char *p);
11776 short2
__ovld vload2(
size_t offset,
const __private
short *p);
11778 int2
__ovld vload2(
size_t offset,
const __private
int *p);
11780 long2
__ovld vload2(
size_t offset,
const __private
long *p);
11782 float2
__ovld vload2(
size_t offset,
const __private
float *p);
11783 char3
__ovld vload3(
size_t offset,
const __private
char *p);
11785 short3
__ovld vload3(
size_t offset,
const __private
short *p);
11787 int3
__ovld vload3(
size_t offset,
const __private
int *p);
11789 long3
__ovld vload3(
size_t offset,
const __private
long *p);
11791 float3
__ovld vload3(
size_t offset,
const __private
float *p);
11792 char4
__ovld vload4(
size_t offset,
const __private
char *p);
11794 short4
__ovld vload4(
size_t offset,
const __private
short *p);
11796 int4
__ovld vload4(
size_t offset,
const __private
int *p);
11798 long4
__ovld vload4(
size_t offset,
const __private
long *p);
11800 float4
__ovld vload4(
size_t offset,
const __private
float *p);
11801 char8
__ovld vload8(
size_t offset,
const __private
char *p);
11803 short8
__ovld vload8(
size_t offset,
const __private
short *p);
11805 int8
__ovld vload8(
size_t offset,
const __private
int *p);
11807 long8
__ovld vload8(
size_t offset,
const __private
long *p);
11809 float8
__ovld vload8(
size_t offset,
const __private
float *p);
11810 char16
__ovld vload16(
size_t offset,
const __private
char *p);
11812 short16
__ovld vload16(
size_t offset,
const __private
short *p);
11814 int16
__ovld vload16(
size_t offset,
const __private
int *p);
11816 long16
__ovld vload16(
size_t offset,
const __private
long *p);
11818 float16
__ovld vload16(
size_t offset,
const __private
float *p);
11821 double2
__ovld vload2(
size_t offset,
const __global
double *p);
11822 double3
__ovld vload3(
size_t offset,
const __global
double *p);
11823 double4
__ovld vload4(
size_t offset,
const __global
double *p);
11824 double8
__ovld vload8(
size_t offset,
const __global
double *p);
11825 double16
__ovld vload16(
size_t offset,
const __global
double *p);
11826 double2
__ovld vload2(
size_t offset,
const __local
double *p);
11827 double3
__ovld vload3(
size_t offset,
const __local
double *p);
11828 double4
__ovld vload4(
size_t offset,
const __local
double *p);
11829 double8
__ovld vload8(
size_t offset,
const __local
double *p);
11830 double16
__ovld vload16(
size_t offset,
const __local
double *p);
11831 double2
__ovld vload2(
size_t offset,
const __private
double *p);
11832 double3
__ovld vload3(
size_t offset,
const __private
double *p);
11833 double4
__ovld vload4(
size_t offset,
const __private
double *p);
11834 double8
__ovld vload8(
size_t offset,
const __private
double *p);
11835 double16
__ovld vload16(
size_t offset,
const __private
double *p);
11836 #endif //cl_khr_fp64 11839 half
__ovld vload(
size_t offset,
const __global half *p);
11840 half2
__ovld vload2(
size_t offset,
const __global half *p);
11841 half3
__ovld vload3(
size_t offset,
const __global half *p);
11842 half4
__ovld vload4(
size_t offset,
const __global half *p);
11843 half8
__ovld vload8(
size_t offset,
const __global half *p);
11844 half16
__ovld vload16(
size_t offset,
const __global half *p);
11845 half
__ovld vload(
size_t offset,
const __local half *p);
11846 half2
__ovld vload2(
size_t offset,
const __local half *p);
11847 half3
__ovld vload3(
size_t offset,
const __local half *p);
11848 half4
__ovld vload4(
size_t offset,
const __local half *p);
11849 half8
__ovld vload8(
size_t offset,
const __local half *p);
11850 half16
__ovld vload16(
size_t offset,
const __local half *p);
11851 half
__ovld vload(
size_t offset,
const __private half *p);
11852 half2
__ovld vload2(
size_t offset,
const __private half *p);
11853 half3
__ovld vload3(
size_t offset,
const __private half *p);
11854 half4
__ovld vload4(
size_t offset,
const __private half *p);
11855 half8
__ovld vload8(
size_t offset,
const __private half *p);
11856 half16
__ovld vload16(
size_t offset,
const __private half *p);
11857 #endif //cl_khr_fp16 11858 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 11860 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 11907 void __ovld vstore2(double2 data,
size_t offset,
double *p);
11908 void __ovld vstore3(double3 data,
size_t offset,
double *p);
11909 void __ovld vstore4(double4 data,
size_t offset,
double *p);
11910 void __ovld vstore8(double8 data,
size_t offset,
double *p);
11912 #endif //cl_khr_fp64 11914 void __ovld vstore(half data,
size_t offset, half *p);
11920 #endif //cl_khr_fp16 11922 void __ovld vstore2(char2 data,
size_t offset, __global
char *p);
11924 void __ovld vstore2(short2 data,
size_t offset, __global
short *p);
11926 void __ovld vstore2(int2 data,
size_t offset, __global
int *p);
11928 void __ovld vstore2(long2 data,
size_t offset, __global
long *p);
11930 void __ovld vstore2(float2 data,
size_t offset, __global
float *p);
11931 void __ovld vstore3(char3 data,
size_t offset, __global
char *p);
11933 void __ovld vstore3(short3 data,
size_t offset, __global
short *p);
11935 void __ovld vstore3(int3 data,
size_t offset, __global
int *p);
11937 void __ovld vstore3(long3 data,
size_t offset, __global
long *p);
11939 void __ovld vstore3(float3 data,
size_t offset, __global
float *p);
11940 void __ovld vstore4(char4 data,
size_t offset, __global
char *p);
11942 void __ovld vstore4(short4 data,
size_t offset, __global
short *p);
11944 void __ovld vstore4(int4 data,
size_t offset, __global
int *p);
11946 void __ovld vstore4(long4 data,
size_t offset, __global
long *p);
11948 void __ovld vstore4(float4 data,
size_t offset, __global
float *p);
11949 void __ovld vstore8(char8 data,
size_t offset, __global
char *p);
11951 void __ovld vstore8(short8 data,
size_t offset, __global
short *p);
11953 void __ovld vstore8(int8 data,
size_t offset, __global
int *p);
11955 void __ovld vstore8(long8 data,
size_t offset, __global
long *p);
11957 void __ovld vstore8(float8 data,
size_t offset, __global
float *p);
11958 void __ovld vstore16(char16 data,
size_t offset, __global
char *p);
11960 void __ovld vstore16(short16 data,
size_t offset, __global
short *p);
11962 void __ovld vstore16(int16 data,
size_t offset, __global
int *p);
11964 void __ovld vstore16(long16 data,
size_t offset, __global
long *p);
11966 void __ovld vstore16(float16 data,
size_t offset, __global
float *p);
11967 void __ovld vstore2(char2 data,
size_t offset, __local
char *p);
11969 void __ovld vstore2(short2 data,
size_t offset, __local
short *p);
11971 void __ovld vstore2(int2 data,
size_t offset, __local
int *p);
11973 void __ovld vstore2(long2 data,
size_t offset, __local
long *p);
11975 void __ovld vstore2(float2 data,
size_t offset, __local
float *p);
11976 void __ovld vstore3(char3 data,
size_t offset, __local
char *p);
11978 void __ovld vstore3(short3 data,
size_t offset, __local
short *p);
11980 void __ovld vstore3(int3 data,
size_t offset, __local
int *p);
11982 void __ovld vstore3(long3 data,
size_t offset, __local
long *p);
11984 void __ovld vstore3(float3 data,
size_t offset, __local
float *p);
11985 void __ovld vstore4(char4 data,
size_t offset, __local
char *p);
11987 void __ovld vstore4(short4 data,
size_t offset, __local
short *p);
11989 void __ovld vstore4(int4 data,
size_t offset, __local
int *p);
11991 void __ovld vstore4(long4 data,
size_t offset, __local
long *p);
11993 void __ovld vstore4(float4 data,
size_t offset, __local
float *p);
11994 void __ovld vstore8(char8 data,
size_t offset, __local
char *p);
11996 void __ovld vstore8(short8 data,
size_t offset, __local
short *p);
11998 void __ovld vstore8(int8 data,
size_t offset, __local
int *p);
12000 void __ovld vstore8(long8 data,
size_t offset, __local
long *p);
12002 void __ovld vstore8(float8 data,
size_t offset, __local
float *p);
12003 void __ovld vstore16(char16 data,
size_t offset, __local
char *p);
12005 void __ovld vstore16(short16 data,
size_t offset, __local
short *p);
12007 void __ovld vstore16(int16 data,
size_t offset, __local
int *p);
12009 void __ovld vstore16(long16 data,
size_t offset, __local
long *p);
12011 void __ovld vstore16(float16 data,
size_t offset, __local
float *p);
12012 void __ovld vstore2(char2 data,
size_t offset, __private
char *p);
12014 void __ovld vstore2(short2 data,
size_t offset, __private
short *p);
12016 void __ovld vstore2(int2 data,
size_t offset, __private
int *p);
12018 void __ovld vstore2(long2 data,
size_t offset, __private
long *p);
12020 void __ovld vstore2(float2 data,
size_t offset, __private
float *p);
12021 void __ovld vstore3(char3 data,
size_t offset, __private
char *p);
12023 void __ovld vstore3(short3 data,
size_t offset, __private
short *p);
12025 void __ovld vstore3(int3 data,
size_t offset, __private
int *p);
12027 void __ovld vstore3(long3 data,
size_t offset, __private
long *p);
12029 void __ovld vstore3(float3 data,
size_t offset, __private
float *p);
12030 void __ovld vstore4(char4 data,
size_t offset, __private
char *p);
12032 void __ovld vstore4(short4 data,
size_t offset, __private
short *p);
12034 void __ovld vstore4(int4 data,
size_t offset, __private
int *p);
12036 void __ovld vstore4(long4 data,
size_t offset, __private
long *p);
12038 void __ovld vstore4(float4 data,
size_t offset, __private
float *p);
12039 void __ovld vstore8(char8 data,
size_t offset, __private
char *p);
12041 void __ovld vstore8(short8 data,
size_t offset, __private
short *p);
12043 void __ovld vstore8(int8 data,
size_t offset, __private
int *p);
12045 void __ovld vstore8(long8 data,
size_t offset, __private
long *p);
12047 void __ovld vstore8(float8 data,
size_t offset, __private
float *p);
12048 void __ovld vstore16(char16 data,
size_t offset, __private
char *p);
12050 void __ovld vstore16(short16 data,
size_t offset, __private
short *p);
12052 void __ovld vstore16(int16 data,
size_t offset, __private
int *p);
12054 void __ovld vstore16(long16 data,
size_t offset, __private
long *p);
12056 void __ovld vstore16(float16 data,
size_t offset, __private
float *p);
12058 void __ovld vstore2(double2 data,
size_t offset, __global
double *p);
12059 void __ovld vstore3(double3 data,
size_t offset, __global
double *p);
12060 void __ovld vstore4(double4 data,
size_t offset, __global
double *p);
12061 void __ovld vstore8(double8 data,
size_t offset, __global
double *p);
12062 void __ovld vstore16(double16 data,
size_t offset, __global
double *p);
12063 void __ovld vstore2(double2 data,
size_t offset, __local
double *p);
12064 void __ovld vstore3(double3 data,
size_t offset, __local
double *p);
12065 void __ovld vstore4(double4 data,
size_t offset, __local
double *p);
12066 void __ovld vstore8(double8 data,
size_t offset, __local
double *p);
12067 void __ovld vstore16(double16 data,
size_t offset, __local
double *p);
12068 void __ovld vstore2(double2 data,
size_t offset, __private
double *p);
12069 void __ovld vstore3(double3 data,
size_t offset, __private
double *p);
12070 void __ovld vstore4(double4 data,
size_t offset, __private
double *p);
12071 void __ovld vstore8(double8 data,
size_t offset, __private
double *p);
12072 void __ovld vstore16(double16 data,
size_t offset, __private
double *p);
12073 #endif //cl_khr_fp64 12075 void __ovld vstore(half data,
size_t offset, __global half *p);
12076 void __ovld vstore2(half2 data,
size_t offset, __global half *p);
12077 void __ovld vstore3(half3 data,
size_t offset, __global half *p);
12078 void __ovld vstore4(half4 data,
size_t offset, __global half *p);
12079 void __ovld vstore8(half8 data,
size_t offset, __global half *p);
12080 void __ovld vstore16(half16 data,
size_t offset, __global half *p);
12081 void __ovld vstore(half data,
size_t offset, __local half *p);
12082 void __ovld vstore2(half2 data,
size_t offset, __local half *p);
12083 void __ovld vstore3(half3 data,
size_t offset, __local half *p);
12084 void __ovld vstore4(half4 data,
size_t offset, __local half *p);
12085 void __ovld vstore8(half8 data,
size_t offset, __local half *p);
12086 void __ovld vstore16(half16 data,
size_t offset, __local half *p);
12087 void __ovld vstore(half data,
size_t offset, __private half *p);
12088 void __ovld vstore2(half2 data,
size_t offset, __private half *p);
12089 void __ovld vstore3(half3 data,
size_t offset, __private half *p);
12090 void __ovld vstore4(half4 data,
size_t offset, __private half *p);
12091 void __ovld vstore8(half8 data,
size_t offset, __private half *p);
12092 void __ovld vstore16(half16 data,
size_t offset, __private half *p);
12093 #endif //cl_khr_fp16 12094 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 12105 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 12111 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 12126 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 12148 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 12161 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 12173 #endif //cl_khr_fp64 12206 #endif //cl_khr_fp64 12207 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 12220 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 12272 #endif //cl_khr_fp64 12425 #endif //cl_khr_fp64 12426 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 12447 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 12473 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 12491 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 12562 #endif //cl_khr_fp64 12775 #endif //cl_khr_fp64 12776 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 12787 #define CLK_LOCAL_MEM_FENCE 0x01 12793 #define CLK_GLOBAL_MEM_FENCE 0x02 12795 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 12801 #define CLK_IMAGE_MEM_FENCE 0x04 12802 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 12836 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 12843 #if defined(cl_intel_subgroups) || defined(cl_khr_subgroups) 12844 memory_scope_sub_group = __OPENCL_MEMORY_SCOPE_SUB_GROUP
12850 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 12895 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 12906 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 13058 #endif //cl_khr_fp64 13072 #endif //cl_khr_fp16 13221 #endif //cl_khr_fp64 13235 #endif //cl_khr_fp16 13257 void __ovld prefetch(
const __global
char *p,
size_t num_elements);
13259 void __ovld prefetch(
const __global
short *p,
size_t num_elements);
13261 void __ovld prefetch(
const __global
int *p,
size_t num_elements);
13263 void __ovld prefetch(
const __global
long *p,
size_t num_elements);
13265 void __ovld prefetch(
const __global
float *p,
size_t num_elements);
13266 void __ovld prefetch(
const __global char2 *p,
size_t num_elements);
13267 void __ovld prefetch(
const __global uchar2 *p,
size_t num_elements);
13268 void __ovld prefetch(
const __global short2 *p,
size_t num_elements);
13269 void __ovld prefetch(
const __global ushort2 *p,
size_t num_elements);
13270 void __ovld prefetch(
const __global int2 *p,
size_t num_elements);
13271 void __ovld prefetch(
const __global uint2 *p,
size_t num_elements);
13272 void __ovld prefetch(
const __global long2 *p,
size_t num_elements);
13273 void __ovld prefetch(
const __global ulong2 *p,
size_t num_elements);
13274 void __ovld prefetch(
const __global float2 *p,
size_t num_elements);
13275 void __ovld prefetch(
const __global char3 *p,
size_t num_elements);
13276 void __ovld prefetch(
const __global uchar3 *p,
size_t num_elements);
13277 void __ovld prefetch(
const __global short3 *p,
size_t num_elements);
13278 void __ovld prefetch(
const __global ushort3 *p,
size_t num_elements);
13279 void __ovld prefetch(
const __global int3 *p,
size_t num_elements);
13280 void __ovld prefetch(
const __global uint3 *p,
size_t num_elements);
13281 void __ovld prefetch(
const __global long3 *p,
size_t num_elements);
13282 void __ovld prefetch(
const __global ulong3 *p,
size_t num_elements);
13283 void __ovld prefetch(
const __global float3 *p,
size_t num_elements);
13284 void __ovld prefetch(
const __global char4 *p,
size_t num_elements);
13285 void __ovld prefetch(
const __global uchar4 *p,
size_t num_elements);
13286 void __ovld prefetch(
const __global short4 *p,
size_t num_elements);
13287 void __ovld prefetch(
const __global ushort4 *p,
size_t num_elements);
13288 void __ovld prefetch(
const __global int4 *p,
size_t num_elements);
13289 void __ovld prefetch(
const __global uint4 *p,
size_t num_elements);
13290 void __ovld prefetch(
const __global long4 *p,
size_t num_elements);
13291 void __ovld prefetch(
const __global ulong4 *p,
size_t num_elements);
13292 void __ovld prefetch(
const __global float4 *p,
size_t num_elements);
13293 void __ovld prefetch(
const __global char8 *p,
size_t num_elements);
13294 void __ovld prefetch(
const __global uchar8 *p,
size_t num_elements);
13295 void __ovld prefetch(
const __global short8 *p,
size_t num_elements);
13296 void __ovld prefetch(
const __global ushort8 *p,
size_t num_elements);
13297 void __ovld prefetch(
const __global int8 *p,
size_t num_elements);
13298 void __ovld prefetch(
const __global uint8 *p,
size_t num_elements);
13299 void __ovld prefetch(
const __global long8 *p,
size_t num_elements);
13300 void __ovld prefetch(
const __global ulong8 *p,
size_t num_elements);
13301 void __ovld prefetch(
const __global float8 *p,
size_t num_elements);
13302 void __ovld prefetch(
const __global char16 *p,
size_t num_elements);
13303 void __ovld prefetch(
const __global uchar16 *p,
size_t num_elements);
13304 void __ovld prefetch(
const __global short16 *p,
size_t num_elements);
13305 void __ovld prefetch(
const __global ushort16 *p,
size_t num_elements);
13306 void __ovld prefetch(
const __global int16 *p,
size_t num_elements);
13307 void __ovld prefetch(
const __global uint16 *p,
size_t num_elements);
13308 void __ovld prefetch(
const __global long16 *p,
size_t num_elements);
13309 void __ovld prefetch(
const __global ulong16 *p,
size_t num_elements);
13310 void __ovld prefetch(
const __global float16 *p,
size_t num_elements);
13312 void __ovld prefetch(
const __global
double *p,
size_t num_elements);
13313 void __ovld prefetch(
const __global double2 *p,
size_t num_elements);
13314 void __ovld prefetch(
const __global double3 *p,
size_t num_elements);
13315 void __ovld prefetch(
const __global double4 *p,
size_t num_elements);
13316 void __ovld prefetch(
const __global double8 *p,
size_t num_elements);
13317 void __ovld prefetch(
const __global double16 *p,
size_t num_elements);
13318 #endif //cl_khr_fp64 13320 void __ovld prefetch(
const __global half *p,
size_t num_elements);
13321 void __ovld prefetch(
const __global half2 *p,
size_t num_elements);
13322 void __ovld prefetch(
const __global half3 *p,
size_t num_elements);
13323 void __ovld prefetch(
const __global half4 *p,
size_t num_elements);
13324 void __ovld prefetch(
const __global half8 *p,
size_t num_elements);
13325 void __ovld prefetch(
const __global half16 *p,
size_t num_elements);
13326 #endif // cl_khr_fp16 13330 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) 13331 #pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable 13332 #pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable 13341 unsigned int __ovld atomic_add(
volatile __global
unsigned int *p,
unsigned int val);
13343 unsigned int __ovld atomic_add(
volatile __local
unsigned int *p,
unsigned int val);
13345 #if defined(cl_khr_global_int32_base_atomics) 13346 int __ovld atom_add(
volatile __global
int *p,
int val);
13347 unsigned int __ovld atom_add(
volatile __global
unsigned int *p,
unsigned int val);
13349 #if defined(cl_khr_local_int32_base_atomics) 13350 int __ovld atom_add(
volatile __local
int *p,
int val);
13351 unsigned int __ovld atom_add(
volatile __local
unsigned int *p,
unsigned int val);
13354 #if defined(cl_khr_int64_base_atomics) 13355 long __ovld atom_add(
volatile __global
long *p,
long val);
13356 unsigned long __ovld atom_add(
volatile __global
unsigned long *p,
unsigned long val);
13357 long __ovld atom_add(
volatile __local
long *p,
long val);
13358 unsigned long __ovld atom_add(
volatile __local
unsigned long *p,
unsigned long val);
13367 unsigned int __ovld atomic_sub(
volatile __global
unsigned int *p,
unsigned int val);
13369 unsigned int __ovld atomic_sub(
volatile __local
unsigned int *p,
unsigned int val);
13371 #if defined(cl_khr_global_int32_base_atomics) 13372 int __ovld atom_sub(
volatile __global
int *p,
int val);
13373 unsigned int __ovld atom_sub(
volatile __global
unsigned int *p,
unsigned int val);
13375 #if defined(cl_khr_local_int32_base_atomics) 13376 int __ovld atom_sub(
volatile __local
int *p,
int val);
13377 unsigned int __ovld atom_sub(
volatile __local
unsigned int *p,
unsigned int val);
13380 #if defined(cl_khr_int64_base_atomics) 13381 long __ovld atom_sub(
volatile __global
long *p,
long val);
13382 unsigned long __ovld atom_sub(
volatile __global
unsigned long *p,
unsigned long val);
13383 long __ovld atom_sub(
volatile __local
long *p,
long val);
13384 unsigned long __ovld atom_sub(
volatile __local
unsigned long *p,
unsigned long val);
13393 unsigned int __ovld atomic_xchg(
volatile __global
unsigned int *p,
unsigned int val);
13395 unsigned int __ovld atomic_xchg(
volatile __local
unsigned int *p,
unsigned int val);
13399 #if defined(cl_khr_global_int32_base_atomics) 13400 int __ovld atom_xchg(
volatile __global
int *p,
int val);
13401 unsigned int __ovld atom_xchg(
volatile __global
unsigned int *p,
unsigned int val);
13403 #if defined(cl_khr_local_int32_base_atomics) 13404 int __ovld atom_xchg(
volatile __local
int *p,
int val);
13405 unsigned int __ovld atom_xchg(
volatile __local
unsigned int *p,
unsigned int val);
13408 #if defined(cl_khr_int64_base_atomics) 13409 long __ovld atom_xchg(
volatile __global
long *p,
long val);
13410 long __ovld atom_xchg(
volatile __local
long *p,
long val);
13411 unsigned long __ovld atom_xchg(
volatile __global
unsigned long *p,
unsigned long val);
13412 unsigned long __ovld atom_xchg(
volatile __local
unsigned long *p,
unsigned long val);
13426 #if defined(cl_khr_global_int32_base_atomics) 13427 int __ovld atom_inc(
volatile __global
int *p);
13428 unsigned int __ovld atom_inc(
volatile __global
unsigned int *p);
13430 #if defined(cl_khr_local_int32_base_atomics) 13431 int __ovld atom_inc(
volatile __local
int *p);
13432 unsigned int __ovld atom_inc(
volatile __local
unsigned int *p);
13435 #if defined(cl_khr_int64_base_atomics) 13436 long __ovld atom_inc(
volatile __global
long *p);
13437 unsigned long __ovld atom_inc(
volatile __global
unsigned long *p);
13438 long __ovld atom_inc(
volatile __local
long *p);
13439 unsigned long __ovld atom_inc(
volatile __local
unsigned long *p);
13453 #if defined(cl_khr_global_int32_base_atomics) 13454 int __ovld atom_dec(
volatile __global
int *p);
13455 unsigned int __ovld atom_dec(
volatile __global
unsigned int *p);
13457 #if defined(cl_khr_local_int32_base_atomics) 13458 int __ovld atom_dec(
volatile __local
int *p);
13459 unsigned int __ovld atom_dec(
volatile __local
unsigned int *p);
13462 #if defined(cl_khr_int64_base_atomics) 13463 long __ovld atom_dec(
volatile __global
long *p);
13464 unsigned long __ovld atom_dec(
volatile __global
unsigned long *p);
13465 long __ovld atom_dec(
volatile __local
long *p);
13466 unsigned long __ovld atom_dec(
volatile __local
unsigned long *p);
13477 unsigned int __ovld atomic_cmpxchg(
volatile __global
unsigned int *p,
unsigned int cmp,
unsigned int val);
13479 unsigned int __ovld atomic_cmpxchg(
volatile __local
unsigned int *p,
unsigned int cmp,
unsigned int val);
13481 #if defined(cl_khr_global_int32_base_atomics) 13482 int __ovld atom_cmpxchg(
volatile __global
int *p,
int cmp,
int val);
13483 unsigned int __ovld atom_cmpxchg(
volatile __global
unsigned int *p,
unsigned int cmp,
unsigned int val);
13485 #if defined(cl_khr_local_int32_base_atomics) 13486 int __ovld atom_cmpxchg(
volatile __local
int *p,
int cmp,
int val);
13487 unsigned int __ovld atom_cmpxchg(
volatile __local
unsigned int *p,
unsigned int cmp,
unsigned int val);
13490 #if defined(cl_khr_int64_base_atomics) 13491 long __ovld atom_cmpxchg(
volatile __global
long *p,
long cmp,
long val);
13492 unsigned long __ovld atom_cmpxchg(
volatile __global
unsigned long *p,
unsigned long cmp,
unsigned long val);
13493 long __ovld atom_cmpxchg(
volatile __local
long *p,
long cmp,
long val);
13494 unsigned long __ovld atom_cmpxchg(
volatile __local
unsigned long *p,
unsigned long cmp,
unsigned long val);
13505 unsigned int __ovld atomic_min(
volatile __global
unsigned int *p,
unsigned int val);
13507 unsigned int __ovld atomic_min(
volatile __local
unsigned int *p,
unsigned int val);
13509 #if defined(cl_khr_global_int32_extended_atomics) 13510 int __ovld atom_min(
volatile __global
int *p,
int val);
13511 unsigned int __ovld atom_min(
volatile __global
unsigned int *p,
unsigned int val);
13513 #if defined(cl_khr_local_int32_extended_atomics) 13514 int __ovld atom_min(
volatile __local
int *p,
int val);
13515 unsigned int __ovld atom_min(
volatile __local
unsigned int *p,
unsigned int val);
13518 #if defined(cl_khr_int64_extended_atomics) 13519 long __ovld atom_min(
volatile __global
long *p,
long val);
13520 unsigned long __ovld atom_min(
volatile __global
unsigned long *p,
unsigned long val);
13521 long __ovld atom_min(
volatile __local
long *p,
long val);
13522 unsigned long __ovld atom_min(
volatile __local
unsigned long *p,
unsigned long val);
13533 unsigned int __ovld atomic_max(
volatile __global
unsigned int *p,
unsigned int val);
13535 unsigned int __ovld atomic_max(
volatile __local
unsigned int *p,
unsigned int val);
13537 #if defined(cl_khr_global_int32_extended_atomics) 13538 int __ovld atom_max(
volatile __global
int *p,
int val);
13539 unsigned int __ovld atom_max(
volatile __global
unsigned int *p,
unsigned int val);
13541 #if defined(cl_khr_local_int32_extended_atomics) 13542 int __ovld atom_max(
volatile __local
int *p,
int val);
13543 unsigned int __ovld atom_max(
volatile __local
unsigned int *p,
unsigned int val);
13546 #if defined(cl_khr_int64_extended_atomics) 13547 long __ovld atom_max(
volatile __global
long *p,
long val);
13548 unsigned long __ovld atom_max(
volatile __global
unsigned long *p,
unsigned long val);
13549 long __ovld atom_max(
volatile __local
long *p,
long val);
13550 unsigned long __ovld atom_max(
volatile __local
unsigned long *p,
unsigned long val);
13560 unsigned int __ovld atomic_and(
volatile __global
unsigned int *p,
unsigned int val);
13562 unsigned int __ovld atomic_and(
volatile __local
unsigned int *p,
unsigned int val);
13564 #if defined(cl_khr_global_int32_extended_atomics) 13565 int __ovld atom_and(
volatile __global
int *p,
int val);
13566 unsigned int __ovld atom_and(
volatile __global
unsigned int *p,
unsigned int val);
13568 #if defined(cl_khr_local_int32_extended_atomics) 13569 int __ovld atom_and(
volatile __local
int *p,
int val);
13570 unsigned int __ovld atom_and(
volatile __local
unsigned int *p,
unsigned int val);
13573 #if defined(cl_khr_int64_extended_atomics) 13574 long __ovld atom_and(
volatile __global
long *p,
long val);
13575 unsigned long __ovld atom_and(
volatile __global
unsigned long *p,
unsigned long val);
13576 long __ovld atom_and(
volatile __local
long *p,
long val);
13577 unsigned long __ovld atom_and(
volatile __local
unsigned long *p,
unsigned long val);
13587 unsigned int __ovld atomic_or(
volatile __global
unsigned int *p,
unsigned int val);
13589 unsigned int __ovld atomic_or(
volatile __local
unsigned int *p,
unsigned int val);
13591 #if defined(cl_khr_global_int32_extended_atomics) 13592 int __ovld atom_or(
volatile __global
int *p,
int val);
13593 unsigned int __ovld atom_or(
volatile __global
unsigned int *p,
unsigned int val);
13595 #if defined(cl_khr_local_int32_extended_atomics) 13596 int __ovld atom_or(
volatile __local
int *p,
int val);
13597 unsigned int __ovld atom_or(
volatile __local
unsigned int *p,
unsigned int val);
13600 #if defined(cl_khr_int64_extended_atomics) 13601 long __ovld atom_or(
volatile __global
long *p,
long val);
13602 unsigned long __ovld atom_or(
volatile __global
unsigned long *p,
unsigned long val);
13603 long __ovld atom_or(
volatile __local
long *p,
long val);
13604 unsigned long __ovld atom_or(
volatile __local
unsigned long *p,
unsigned long val);
13614 unsigned int __ovld atomic_xor(
volatile __global
unsigned int *p,
unsigned int val);
13616 unsigned int __ovld atomic_xor(
volatile __local
unsigned int *p,
unsigned int val);
13618 #if defined(cl_khr_global_int32_extended_atomics) 13619 int __ovld atom_xor(
volatile __global
int *p,
int val);
13620 unsigned int __ovld atom_xor(
volatile __global
unsigned int *p,
unsigned int val);
13622 #if defined(cl_khr_local_int32_extended_atomics) 13623 int __ovld atom_xor(
volatile __local
int *p,
int val);
13624 unsigned int __ovld atom_xor(
volatile __local
unsigned int *p,
unsigned int val);
13627 #if defined(cl_khr_int64_extended_atomics) 13628 long __ovld atom_xor(
volatile __global
long *p,
long val);
13629 unsigned long __ovld atom_xor(
volatile __global
unsigned long *p,
unsigned long val);
13630 long __ovld atom_xor(
volatile __local
long *p,
long val);
13631 unsigned long __ovld atom_xor(
volatile __local
unsigned long *p,
unsigned long val);
13634 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) 13635 #pragma OPENCL EXTENSION cl_khr_int64_base_atomics : disable 13636 #pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : disable 13641 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 13642 #ifndef ATOMIC_VAR_INIT 13643 #define ATOMIC_VAR_INIT(x) (x) 13644 #endif //ATOMIC_VAR_INIT 13645 #define ATOMIC_FLAG_INIT 0 13658 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) 13659 #pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable 13660 #pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable 13667 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) 13672 #endif //cl_khr_fp64 13729 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) 13778 #endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) 13784 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) 13836 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) 13841 #endif //cl_khr_fp64 13861 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) 13866 #endif //cl_khr_fp64 13886 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) 13891 #endif //cl_khr_fp64 13904 int desired, memory_order success, memory_order failure);
13906 int desired, memory_order success, memory_order failure, memory_scope scope);
13909 uint desired, memory_order success, memory_order failure);
13911 uint desired, memory_order success, memory_order failure, memory_scope scope);
13914 int desired, memory_order success, memory_order failure);
13916 int desired, memory_order success, memory_order failure, memory_scope scope);
13919 uint desired, memory_order success, memory_order failure);
13921 uint desired, memory_order success, memory_order failure, memory_scope scope);
13924 float desired, memory_order success, memory_order failure);
13926 float desired, memory_order success, memory_order failure, memory_scope scope);
13929 float desired, memory_order success, memory_order failure);
13931 float desired, memory_order success, memory_order failure, memory_scope scope);
13932 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) 13936 double desired, memory_order success, memory_order failure);
13938 double desired, memory_order success, memory_order failure, memory_scope scope);
13941 double desired, memory_order success, memory_order failure);
13943 double desired, memory_order success, memory_order failure, memory_scope scope);
13944 #endif //cl_khr_fp64 13947 long desired, memory_order success, memory_order failure);
13949 long desired, memory_order success, memory_order failure, memory_scope scope);
13952 long desired, memory_order success, memory_order failure);
13954 long desired, memory_order success, memory_order failure, memory_scope scope);
13957 ulong desired, memory_order success, memory_order failure);
13959 ulong desired, memory_order success, memory_order failure, memory_scope scope);
13962 ulong desired, memory_order success, memory_order failure);
13964 ulong desired, memory_order success, memory_order failure, memory_scope scope);
13976 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 14222 #endif //cl_khr_fp64 14244 #endif //cl_khr_fp16 14446 #endif //cl_khr_fp64 14468 #endif //cl_khr_fp16 14470 #if __OPENCL_C_VERSION__ >= CL_VERSION_1_2 14473 int printf(__constant
const char* st, ...);
14482 #define CLK_ADDRESS_NONE 0 14483 #define CLK_ADDRESS_CLAMP_TO_EDGE 2 14484 #define CLK_ADDRESS_CLAMP 4 14485 #define CLK_ADDRESS_REPEAT 6 14486 #define CLK_ADDRESS_MIRRORED_REPEAT 8 14491 #define CLK_NORMALIZED_COORDS_FALSE 0 14492 #define CLK_NORMALIZED_COORDS_TRUE 1 14497 #define CLK_FILTER_NEAREST 0x10 14498 #define CLK_FILTER_LINEAR 0x20 14500 #ifdef cl_khr_gl_msaa_sharing 14501 #pragma OPENCL EXTENSION cl_khr_gl_msaa_sharing : enable 14502 #endif //cl_khr_gl_msaa_sharing 14613 #if __OPENCL_C_VERSION__ >= CL_VERSION_1_2 14621 #endif // __OPENCL_C_VERSION__ >= CL_VERSION_1_2 14631 #if __OPENCL_C_VERSION__ >= CL_VERSION_1_2 14639 #endif // __OPENCL_C_VERSION__ >= CL_VERSION_1_2 14641 #ifdef cl_khr_depth_images 14647 #endif //cl_khr_depth_images 14649 #if defined(cl_khr_gl_msaa_sharing) 14661 #endif //cl_khr_gl_msaa_sharing 14664 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 14665 #ifdef cl_khr_mipmap_image 14691 float4
__purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler,
float coord,
float gradientX,
float gradientY);
14692 int4
__purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler,
float coord,
float gradientX,
float gradientY);
14693 uint4
__purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler,
float coord,
float gradientX,
float gradientY);
14695 float4
__purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord,
float gradientX,
float gradientY);
14696 int4
__purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord,
float gradientX,
float gradientY);
14697 uint4
__purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord,
float gradientX,
float gradientY);
14699 float4
__purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
14700 int4
__purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
14701 uint4
__purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
14703 float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
14705 float4
__purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
14706 int4
__purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
14707 uint4
__purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
14709 float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
14711 float4
__purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
14712 int4
__purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
14713 uint4
__purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
14739 #endif //cl_khr_mipmap_image 14740 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 14742 #if __OPENCL_C_VERSION__ >= CL_VERSION_1_2 14768 #ifdef cl_khr_depth_images 14771 #endif //cl_khr_depth_images 14777 #endif // __OPENCL_C_VERSION__ >= CL_VERSION_1_2 14781 half4
__purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler,
int coord);
14782 half4
__purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler,
float coord);
14783 half4
__purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, int2 coord);
14784 half4
__purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, float2 coord);
14785 half4
__purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, int4 coord);
14786 half4
__purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, float4 coord);
14787 #if __OPENCL_C_VERSION__ >= CL_VERSION_1_2 14788 half4
__purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, int2 coord);
14789 half4
__purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, float2 coord);
14790 half4
__purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, int4 coord);
14791 half4
__purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, float4 coord);
14795 half4
__purefn __ovld read_imageh(read_only image1d_t image,
int coord);
14796 half4
__purefn __ovld read_imageh(read_only image2d_t image, int2 coord);
14797 half4
__purefn __ovld read_imageh(read_only image3d_t image, int4 coord);
14798 half4
__purefn __ovld read_imageh(read_only image1d_array_t image, int2 coord);
14799 half4
__purefn __ovld read_imageh(read_only image2d_array_t image, int4 coord);
14800 half4
__purefn __ovld read_imageh(read_only image1d_buffer_t image,
int coord);
14801 #endif // __OPENCL_C_VERSION__ >= CL_VERSION_1_2 14802 #endif //cl_khr_fp16 14805 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 14830 #ifdef cl_khr_depth_images 14833 #endif //cl_khr_depth_images 14835 #if cl_khr_gl_msaa_sharing 14846 #endif //cl_khr_gl_msaa_sharing 14848 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 14849 #ifdef cl_khr_mipmap_image 14854 float4
__purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord,
float lod);
14864 float4
__purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord,
float lod);
14874 float4
__purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler,
float coord,
float gradientX,
float gradientY);
14875 int4
__purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler,
float coord,
float gradientX,
float gradientY);
14876 uint4
__purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler,
float coord,
float gradientX,
float gradientY);
14878 float4
__purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord,
float gradientX,
float gradientY);
14879 int4
__purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord,
float gradientX,
float gradientY);
14880 uint4
__purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord,
float gradientX,
float gradientY);
14882 float4
__purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
14883 int4
__purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
14884 uint4
__purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
14886 float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
14888 float4
__purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
14889 int4
__purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
14890 uint4
__purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
14892 float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
14894 float4
__purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
14895 int4
__purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
14896 uint4
__purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
14902 float4
__purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord,
float lod);
14912 float4
__purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord,
float lod);
14921 #endif //cl_khr_mipmap_image 14922 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 14926 half4
__purefn __ovld read_imageh(read_write image1d_t image,
int coord);
14927 half4
__purefn __ovld read_imageh(read_write image2d_t image, int2 coord);
14928 half4
__purefn __ovld read_imageh(read_write image3d_t image, int4 coord);
14929 half4
__purefn __ovld read_imageh(read_write image1d_array_t image, int2 coord);
14930 half4
__purefn __ovld read_imageh(read_write image2d_array_t image, int4 coord);
14931 half4
__purefn __ovld read_imageh(read_write image1d_buffer_t image,
int coord);
14932 #endif //cl_khr_fp16 14933 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15006 void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, float4 color);
15007 void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int4 color);
15008 void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, uint4 color);
15014 void __ovld write_imagef(write_only image1d_buffer_t image,
int coord, float4 color);
15018 void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord, float4 color);
15019 void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord, int4 color);
15020 void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord, uint4 color);
15022 #ifdef cl_khr_3d_image_writes 15028 #ifdef cl_khr_depth_images 15029 void __ovld write_imagef(write_only image2d_depth_t image, int2 coord,
float color);
15030 void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord,
float color);
15031 #endif //cl_khr_depth_images 15034 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15035 #ifdef cl_khr_mipmap_image 15036 void __ovld write_imagef(write_only image1d_t image,
int coord,
int lod, float4 color);
15037 void __ovld write_imagei(write_only image1d_t image,
int coord,
int lod, int4 color);
15040 void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord,
int lod, float4 color);
15041 void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord,
int lod, int4 color);
15042 void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord,
int lod, uint4 color);
15044 void __ovld write_imagef(write_only image2d_t image, int2 coord,
int lod, float4 color);
15045 void __ovld write_imagei(write_only image2d_t image, int2 coord,
int lod, int4 color);
15048 void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord,
int lod, float4 color);
15049 void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord,
int lod, int4 color);
15050 void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord,
int lod, uint4 color);
15052 void __ovld write_imagef(write_only image2d_depth_t image, int2 coord,
int lod,
float color);
15053 void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord,
int lod,
float color);
15055 #ifdef cl_khr_3d_image_writes 15056 void __ovld write_imagef(write_only image3d_t image, int4 coord,
int lod, float4 color);
15057 void __ovld write_imagei(write_only image3d_t image, int4 coord,
int lod, int4 color);
15060 #endif //cl_khr_mipmap_image 15061 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15065 void __ovld write_imageh(write_only image1d_t image,
int coord, half4 color);
15066 void __ovld write_imageh(write_only image2d_t image, int2 coord, half4 color);
15067 #ifdef cl_khr_3d_image_writes 15068 void __ovld write_imageh(write_only image3d_t image, int4 coord, half4 color);
15070 void __ovld write_imageh(write_only image1d_array_t image, int2 coord, half4 color);
15071 void __ovld write_imageh(write_only image2d_array_t image, int4 coord, half4 color);
15072 void __ovld write_imageh(write_only image1d_buffer_t image,
int coord, half4 color);
15073 #endif //cl_khr_fp16 15076 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15081 void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord, float4 color);
15082 void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord, int4 color);
15083 void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord, uint4 color);
15089 void __ovld write_imagef(read_write image1d_buffer_t image,
int coord, float4 color);
15093 void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord, float4 color);
15094 void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord, int4 color);
15095 void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord, uint4 color);
15097 #ifdef cl_khr_3d_image_writes 15103 #ifdef cl_khr_depth_images 15104 void __ovld write_imagef(read_write image2d_depth_t image, int2 coord,
float color);
15105 void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord,
float color);
15106 #endif //cl_khr_depth_images 15108 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15109 #ifdef cl_khr_mipmap_image 15110 void __ovld write_imagef(read_write image1d_t image,
int coord,
int lod, float4 color);
15111 void __ovld write_imagei(read_write image1d_t image,
int coord,
int lod, int4 color);
15114 void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord,
int lod, float4 color);
15115 void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord,
int lod, int4 color);
15116 void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord,
int lod, uint4 color);
15118 void __ovld write_imagef(read_write image2d_t image, int2 coord,
int lod, float4 color);
15119 void __ovld write_imagei(read_write image2d_t image, int2 coord,
int lod, int4 color);
15122 void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord,
int lod, float4 color);
15123 void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord,
int lod, int4 color);
15124 void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord,
int lod, uint4 color);
15126 void __ovld write_imagef(read_write image2d_depth_t image, int2 coord,
int lod,
float color);
15127 void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord,
int lod,
float color);
15129 #ifdef cl_khr_3d_image_writes 15130 void __ovld write_imagef(read_write image3d_t image, int4 coord,
int lod, float4 color);
15131 void __ovld write_imagei(read_write image3d_t image, int4 coord,
int lod, int4 color);
15134 #endif //cl_khr_mipmap_image 15135 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15139 void __ovld write_imageh(read_write image1d_t image,
int coord, half4 color);
15140 void __ovld write_imageh(read_write image2d_t image, int2 coord, half4 color);
15141 #ifdef cl_khr_3d_image_writes 15142 void __ovld write_imageh(read_write image3d_t image, int4 coord, half4 color);
15144 void __ovld write_imageh(read_write image1d_array_t image, int2 coord, half4 color);
15145 void __ovld write_imageh(read_write image2d_array_t image, int4 coord, half4 color);
15146 void __ovld write_imageh(read_write image1d_buffer_t image,
int coord, half4 color);
15147 #endif //cl_khr_fp16 15148 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15161 #ifdef cl_khr_3d_image_writes 15166 #ifdef cl_khr_depth_images 15169 #endif //cl_khr_depth_images 15170 #if defined(cl_khr_gl_msaa_sharing) 15175 #endif //cl_khr_gl_msaa_sharing 15180 #ifdef cl_khr_3d_image_writes 15185 #ifdef cl_khr_depth_images 15188 #endif //cl_khr_depth_images 15189 #if defined(cl_khr_gl_msaa_sharing) 15194 #endif //cl_khr_gl_msaa_sharing 15196 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15203 #ifdef cl_khr_depth_images 15206 #endif //cl_khr_depth_images 15207 #if defined(cl_khr_gl_msaa_sharing) 15212 #endif //cl_khr_gl_msaa_sharing 15213 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15221 #ifdef cl_khr_depth_images 15224 #endif //cl_khr_depth_images 15225 #if defined(cl_khr_gl_msaa_sharing) 15230 #endif //cl_khr_gl_msaa_sharing 15233 #ifdef cl_khr_3d_image_writes 15237 #ifdef cl_khr_depth_images 15240 #endif //cl_khr_depth_images 15241 #if defined(cl_khr_gl_msaa_sharing) 15246 #endif //cl_khr_gl_msaa_sharing 15248 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15252 #ifdef cl_khr_depth_images 15255 #endif //cl_khr_depth_images 15256 #if defined(cl_khr_gl_msaa_sharing) 15261 #endif //cl_khr_gl_msaa_sharing 15262 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15269 #ifdef cl_khr_3d_image_writes 15273 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15275 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15278 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15279 #ifdef cl_khr_mipmap_image 15284 int __ovld get_image_num_mip_levels(read_only image1d_t image);
15285 int __ovld get_image_num_mip_levels(read_only image2d_t image);
15286 int __ovld get_image_num_mip_levels(read_only image3d_t image);
15288 int __ovld get_image_num_mip_levels(write_only image1d_t image);
15289 int __ovld get_image_num_mip_levels(write_only image2d_t image);
15290 #ifdef cl_khr_3d_image_writes 15291 int __ovld get_image_num_mip_levels(write_only image3d_t image);
15294 int __ovld get_image_num_mip_levels(read_write image1d_t image);
15295 int __ovld get_image_num_mip_levels(read_write image2d_t image);
15296 int __ovld get_image_num_mip_levels(read_write image3d_t image);
15298 int __ovld get_image_num_mip_levels(read_only image1d_array_t image);
15299 int __ovld get_image_num_mip_levels(read_only image2d_array_t image);
15300 int __ovld get_image_num_mip_levels(read_only image2d_array_depth_t image);
15301 int __ovld get_image_num_mip_levels(read_only image2d_depth_t image);
15303 int __ovld get_image_num_mip_levels(write_only image1d_array_t image);
15304 int __ovld get_image_num_mip_levels(write_only image2d_array_t image);
15305 int __ovld get_image_num_mip_levels(write_only image2d_array_depth_t image);
15306 int __ovld get_image_num_mip_levels(write_only image2d_depth_t image);
15308 int __ovld get_image_num_mip_levels(read_write image1d_array_t image);
15309 int __ovld get_image_num_mip_levels(read_write image2d_array_t image);
15310 int __ovld get_image_num_mip_levels(read_write image2d_array_depth_t image);
15311 int __ovld get_image_num_mip_levels(read_write image2d_depth_t image);
15313 #endif //cl_khr_mipmap_image 15314 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15338 #define CLK_SNORM_INT8 0x10D0 15339 #define CLK_SNORM_INT16 0x10D1 15340 #define CLK_UNORM_INT8 0x10D2 15341 #define CLK_UNORM_INT16 0x10D3 15342 #define CLK_UNORM_SHORT_565 0x10D4 15343 #define CLK_UNORM_SHORT_555 0x10D5 15344 #define CLK_UNORM_INT_101010 0x10D6 15345 #define CLK_SIGNED_INT8 0x10D7 15346 #define CLK_SIGNED_INT16 0x10D8 15347 #define CLK_SIGNED_INT32 0x10D9 15348 #define CLK_UNSIGNED_INT8 0x10DA 15349 #define CLK_UNSIGNED_INT16 0x10DB 15350 #define CLK_UNSIGNED_INT32 0x10DC 15351 #define CLK_HALF_FLOAT 0x10DD 15352 #define CLK_FLOAT 0x10DE 15353 #define CLK_UNORM_INT24 0x10DF 15361 #ifdef cl_khr_depth_images 15364 #endif //cl_khr_depth_images 15365 #if defined(cl_khr_gl_msaa_sharing) 15370 #endif //cl_khr_gl_msaa_sharing 15375 #ifdef cl_khr_3d_image_writes 15380 #ifdef cl_khr_depth_images 15383 #endif //cl_khr_depth_images 15384 #if defined(cl_khr_gl_msaa_sharing) 15389 #endif //cl_khr_gl_msaa_sharing 15391 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15398 #ifdef cl_khr_depth_images 15401 #endif //cl_khr_depth_images 15402 #if defined(cl_khr_gl_msaa_sharing) 15407 #endif //cl_khr_gl_msaa_sharing 15408 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15428 #define CLK_R 0x10B0 15429 #define CLK_A 0x10B1 15430 #define CLK_RG 0x10B2 15431 #define CLK_RA 0x10B3 15432 #define CLK_RGB 0x10B4 15433 #define CLK_RGBA 0x10B5 15434 #define CLK_BGRA 0x10B6 15435 #define CLK_ARGB 0x10B7 15436 #define CLK_INTENSITY 0x10B8 15437 #define CLK_LUMINANCE 0x10B9 15438 #define CLK_Rx 0x10BA 15439 #define CLK_RGx 0x10BB 15440 #define CLK_RGBx 0x10BC 15441 #define CLK_DEPTH 0x10BD 15442 #define CLK_DEPTH_STENCIL 0x10BE 15443 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15444 #define CLK_sRGB 0x10BF 15445 #define CLK_sRGBx 0x10C0 15446 #define CLK_sRGBA 0x10C1 15447 #define CLK_sBGRA 0x10C2 15448 #define CLK_ABGR 0x10C3 15449 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15457 #ifdef cl_khr_depth_images 15460 #endif //cl_khr_depth_images 15461 #if defined(cl_khr_gl_msaa_sharing) 15466 #endif //cl_khr_gl_msaa_sharing 15471 #ifdef cl_khr_3d_image_writes 15476 #ifdef cl_khr_depth_images 15479 #endif //cl_khr_depth_images 15480 #if defined(cl_khr_gl_msaa_sharing) 15485 #endif //cl_khr_gl_msaa_sharing 15487 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15494 #ifdef cl_khr_depth_images 15497 #endif //cl_khr_depth_images 15498 #if defined(cl_khr_gl_msaa_sharing) 15503 #endif //cl_khr_gl_msaa_sharing 15504 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15513 #ifdef cl_khr_depth_images 15516 #endif //cl_khr_depth_images 15517 #if defined(cl_khr_gl_msaa_sharing) 15522 #endif //cl_khr_gl_msaa_sharing 15526 #ifdef cl_khr_depth_images 15529 #endif //cl_khr_depth_images 15530 #if defined(cl_khr_gl_msaa_sharing) 15535 #endif //cl_khr_gl_msaa_sharing 15537 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15540 #ifdef cl_khr_depth_images 15543 #endif //cl_khr_depth_images 15544 #if defined(cl_khr_gl_msaa_sharing) 15549 #endif //cl_khr_gl_msaa_sharing 15550 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15559 #ifdef cl_khr_3d_image_writes 15562 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15564 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15572 #ifdef cl_khr_depth_images 15574 #endif //cl_khr_depth_images 15575 #if defined(cl_khr_gl_msaa_sharing) 15578 #endif //cl_khr_gl_msaa_sharing 15582 #ifdef cl_khr_depth_images 15584 #endif //cl_khr_depth_images 15585 #if defined(cl_khr_gl_msaa_sharing) 15588 #endif //cl_khr_gl_msaa_sharing 15590 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15593 #ifdef cl_khr_depth_images 15595 #endif //cl_khr_depth_images 15596 #if defined(cl_khr_gl_msaa_sharing) 15599 #endif //cl_khr_gl_msaa_sharing 15600 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15605 #if defined(cl_khr_gl_msaa_sharing) 15606 int __ovld get_image_num_samples(read_only image2d_msaa_t image);
15607 int __ovld get_image_num_samples(read_only image2d_msaa_depth_t image);
15608 int __ovld get_image_num_samples(read_only image2d_array_msaa_depth_t image);
15609 int __ovld get_image_num_samples(read_only image2d_array_msaa_t image);
15610 int __ovld get_image_num_samples(read_only image2d_array_msaa_depth_t image);
15612 int __ovld get_image_num_samples(write_only image2d_msaa_t image);
15613 int __ovld get_image_num_samples(write_only image2d_msaa_depth_t image);
15614 int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image);
15615 int __ovld get_image_num_samples(write_only image2d_array_msaa_t image);
15616 int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image);
15618 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15619 int __ovld get_image_num_samples(read_write image2d_msaa_t image);
15620 int __ovld get_image_num_samples(read_write image2d_msaa_depth_t image);
15621 int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image);
15622 int __ovld get_image_num_samples(read_write image2d_array_msaa_t image);
15623 int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image);
15624 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15629 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15657 #endif //cl_khr_fp64 15725 #endif //cl_khr_fp64 15727 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15730 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15731 #define CLK_NULL_RESERVE_ID (__builtin_astype(((void*)(__SIZE_MAX__)), reserve_id_t)) 15733 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15737 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15739 #define CL_COMPLETE 0x0 15740 #define CL_RUNNING 0x1 15741 #define CL_SUBMITTED 0x2 15742 #define CL_QUEUED 0x3 15744 #define CLK_SUCCESS 0 15745 #define CLK_ENQUEUE_FAILURE -101 15746 #define CLK_INVALID_QUEUE -102 15747 #define CLK_INVALID_NDRANGE -160 15748 #define CLK_INVALID_EVENT_WAIT_LIST -57 15749 #define CLK_DEVICE_QUEUE_FULL -161 15750 #define CLK_INVALID_ARG_SIZE -51 15751 #define CLK_EVENT_ALLOCATION_FAILURE -100 15752 #define CLK_OUT_OF_RESOURCES -5 15754 #define CLK_NULL_QUEUE 0 15755 #define CLK_NULL_EVENT (__builtin_astype(((void*)(__SIZE_MAX__)), clk_event_t)) 15758 #define CLK_ENQUEUE_FLAGS_NO_WAIT 0x0 15759 #define CLK_ENQUEUE_FLAGS_WAIT_KERNEL 0x1 15760 #define CLK_ENQUEUE_FLAGS_WAIT_WORK_GROUP 0x2 15766 #define CLK_PROFILING_COMMAND_EXEC_TIME 0x1 15768 #define MAX_WORK_DIM 3 15804 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15808 #if defined(cl_intel_subgroups) || defined(cl_khr_subgroups) 15813 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15814 uint __ovld get_enqueued_num_sub_groups(
void);
15815 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15819 void __ovld __conv sub_group_barrier(cl_mem_fence_flags flags);
15820 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15821 void __ovld __conv sub_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
15822 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15831 float __ovld __conv sub_group_broadcast(
float x,
uint sub_group_local_id);
15849 int __ovld __conv sub_group_scan_exclusive_add(
int x);
15851 long __ovld __conv sub_group_scan_exclusive_add(
long x);
15853 float __ovld __conv sub_group_scan_exclusive_add(
float x);
15854 int __ovld __conv sub_group_scan_exclusive_min(
int x);
15856 long __ovld __conv sub_group_scan_exclusive_min(
long x);
15858 float __ovld __conv sub_group_scan_exclusive_min(
float x);
15859 int __ovld __conv sub_group_scan_exclusive_max(
int x);
15861 long __ovld __conv sub_group_scan_exclusive_max(
long x);
15863 float __ovld __conv sub_group_scan_exclusive_max(
float x);
15865 int __ovld __conv sub_group_scan_inclusive_add(
int x);
15867 long __ovld __conv sub_group_scan_inclusive_add(
long x);
15869 float __ovld __conv sub_group_scan_inclusive_add(
float x);
15870 int __ovld __conv sub_group_scan_inclusive_min(
int x);
15872 long __ovld __conv sub_group_scan_inclusive_min(
long x);
15874 float __ovld __conv sub_group_scan_inclusive_min(
float x);
15875 int __ovld __conv sub_group_scan_inclusive_max(
int x);
15877 long __ovld __conv sub_group_scan_inclusive_max(
long x);
15879 float __ovld __conv sub_group_scan_inclusive_max(
float x);
15886 half
__ovld __conv sub_group_scan_exclusive_add(half x);
15887 half
__ovld __conv sub_group_scan_exclusive_min(half x);
15888 half
__ovld __conv sub_group_scan_exclusive_max(half x);
15889 half
__ovld __conv sub_group_scan_inclusive_add(half x);
15890 half
__ovld __conv sub_group_scan_inclusive_min(half x);
15891 half
__ovld __conv sub_group_scan_inclusive_max(half x);
15892 #endif //cl_khr_fp16 15895 double __ovld __conv sub_group_broadcast(
double x,
uint sub_group_local_id);
15899 double __ovld __conv sub_group_scan_exclusive_add(
double x);
15900 double __ovld __conv sub_group_scan_exclusive_min(
double x);
15901 double __ovld __conv sub_group_scan_exclusive_max(
double x);
15902 double __ovld __conv sub_group_scan_inclusive_add(
double x);
15903 double __ovld __conv sub_group_scan_inclusive_min(
double x);
15904 double __ovld __conv sub_group_scan_inclusive_max(
double x);
15905 #endif //cl_khr_fp64 15907 #endif //cl_khr_subgroups cl_intel_subgroups 15909 #if defined(cl_intel_subgroups) 15935 float __ovld __conv intel_sub_group_shuffle_down(
float cur,
float next,
uint c );
15936 float2
__ovld __conv intel_sub_group_shuffle_down( float2 cur, float2 next,
uint c );
15937 float3
__ovld __conv intel_sub_group_shuffle_down( float3 cur, float3 next,
uint c );
15938 float4
__ovld __conv intel_sub_group_shuffle_down( float4 cur, float4 next,
uint c );
15939 float8
__ovld __conv intel_sub_group_shuffle_down( float8 cur, float8 next,
uint c );
15940 float16
__ovld __conv intel_sub_group_shuffle_down( float16 cur, float16 next,
uint c );
15942 int __ovld __conv intel_sub_group_shuffle_down(
int cur,
int next,
uint c );
15943 int2
__ovld __conv intel_sub_group_shuffle_down( int2 cur, int2 next,
uint c );
15944 int3
__ovld __conv intel_sub_group_shuffle_down( int3 cur, int3 next,
uint c );
15945 int4
__ovld __conv intel_sub_group_shuffle_down( int4 cur, int4 next,
uint c );
15946 int8
__ovld __conv intel_sub_group_shuffle_down( int8 cur, int8 next,
uint c );
15947 int16
__ovld __conv intel_sub_group_shuffle_down( int16 cur, int16 next,
uint c );
15950 uint2
__ovld __conv intel_sub_group_shuffle_down( uint2 cur, uint2 next,
uint c );
15951 uint3
__ovld __conv intel_sub_group_shuffle_down( uint3 cur, uint3 next,
uint c );
15952 uint4
__ovld __conv intel_sub_group_shuffle_down( uint4 cur, uint4 next,
uint c );
15953 uint8
__ovld __conv intel_sub_group_shuffle_down( uint8 cur, uint8 next,
uint c );
15954 uint16
__ovld __conv intel_sub_group_shuffle_down( uint16 cur, uint16 next,
uint c );
15956 long __ovld __conv intel_sub_group_shuffle_down(
long prev,
long cur,
uint c );
15959 float __ovld __conv intel_sub_group_shuffle_up(
float prev,
float cur,
uint c );
15960 float2
__ovld __conv intel_sub_group_shuffle_up( float2 prev, float2 cur,
uint c );
15961 float3
__ovld __conv intel_sub_group_shuffle_up( float3 prev, float3 cur,
uint c );
15962 float4
__ovld __conv intel_sub_group_shuffle_up( float4 prev, float4 cur,
uint c );
15963 float8
__ovld __conv intel_sub_group_shuffle_up( float8 prev, float8 cur,
uint c );
15964 float16
__ovld __conv intel_sub_group_shuffle_up( float16 prev, float16 cur,
uint c );
15966 int __ovld __conv intel_sub_group_shuffle_up(
int prev,
int cur,
uint c );
15967 int2
__ovld __conv intel_sub_group_shuffle_up( int2 prev, int2 cur,
uint c );
15968 int3
__ovld __conv intel_sub_group_shuffle_up( int3 prev, int3 cur,
uint c );
15969 int4
__ovld __conv intel_sub_group_shuffle_up( int4 prev, int4 cur,
uint c );
15970 int8
__ovld __conv intel_sub_group_shuffle_up( int8 prev, int8 cur,
uint c );
15971 int16
__ovld __conv intel_sub_group_shuffle_up( int16 prev, int16 cur,
uint c );
15974 uint2
__ovld __conv intel_sub_group_shuffle_up( uint2 prev, uint2 cur,
uint c );
15975 uint3
__ovld __conv intel_sub_group_shuffle_up( uint3 prev, uint3 cur,
uint c );
15976 uint4
__ovld __conv intel_sub_group_shuffle_up( uint4 prev, uint4 cur,
uint c );
15977 uint8
__ovld __conv intel_sub_group_shuffle_up( uint8 prev, uint8 cur,
uint c );
15978 uint16
__ovld __conv intel_sub_group_shuffle_up( uint16 prev, uint16 cur,
uint c );
15980 long __ovld __conv intel_sub_group_shuffle_up(
long prev,
long cur,
uint c );
16007 uint __ovld __conv intel_sub_group_block_read( read_only image2d_t image, int2 coord );
16008 uint2
__ovld __conv intel_sub_group_block_read2( read_only image2d_t image, int2 coord );
16009 uint4
__ovld __conv intel_sub_group_block_read4( read_only image2d_t image, int2 coord );
16010 uint8
__ovld __conv intel_sub_group_block_read8( read_only image2d_t image, int2 coord );
16012 #if (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) 16013 uint __ovld __conv intel_sub_group_block_read(read_write image2d_t image, int2 coord);
16014 uint2
__ovld __conv intel_sub_group_block_read2(read_write image2d_t image, int2 coord);
16015 uint4
__ovld __conv intel_sub_group_block_read4(read_write image2d_t image, int2 coord);
16016 uint8
__ovld __conv intel_sub_group_block_read8(read_write image2d_t image, int2 coord);
16017 #endif // (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) 16020 uint2
__ovld __conv intel_sub_group_block_read2(
const __global
uint* p );
16021 uint4
__ovld __conv intel_sub_group_block_read4(
const __global
uint* p );
16022 uint8
__ovld __conv intel_sub_group_block_read8(
const __global
uint* p );
16024 void __ovld __conv intel_sub_group_block_write(write_only image2d_t image, int2 coord,
uint data);
16025 void __ovld __conv intel_sub_group_block_write2(write_only image2d_t image, int2 coord, uint2 data);
16026 void __ovld __conv intel_sub_group_block_write4(write_only image2d_t image, int2 coord, uint4 data);
16027 void __ovld __conv intel_sub_group_block_write8(write_only image2d_t image, int2 coord, uint8 data);
16029 #if (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) 16030 void __ovld __conv intel_sub_group_block_write(read_write image2d_t image, int2 coord,
uint data);
16031 void __ovld __conv intel_sub_group_block_write2(read_write image2d_t image, int2 coord, uint2 data);
16032 void __ovld __conv intel_sub_group_block_write4(read_write image2d_t image, int2 coord, uint4 data);
16033 void __ovld __conv intel_sub_group_block_write8(read_write image2d_t image, int2 coord, uint8 data);
16034 #endif // (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) 16037 void __ovld __conv intel_sub_group_block_write2( __global
uint* p, uint2 data );
16038 void __ovld __conv intel_sub_group_block_write4( __global
uint* p, uint4 data );
16039 void __ovld __conv intel_sub_group_block_write8( __global
uint* p, uint8 data );
16043 half
__ovld __conv intel_sub_group_shuffle_down( half prev, half cur,
uint c );
16044 half
__ovld __conv intel_sub_group_shuffle_up( half prev, half cur,
uint c );
16048 #if defined(cl_khr_fp64) 16050 double __ovld __conv intel_sub_group_shuffle_down(
double prev,
double cur,
uint c );
16051 double __ovld __conv intel_sub_group_shuffle_up(
double prev,
double cur,
uint c );
16055 #endif //cl_intel_subgroups 16057 #if defined(cl_intel_subgroups_short) 16058 short __ovld __conv intel_sub_group_broadcast(
short x,
uint sub_group_local_id );
16059 short2
__ovld __conv intel_sub_group_broadcast( short2 x,
uint sub_group_local_id );
16060 short3
__ovld __conv intel_sub_group_broadcast( short3 x,
uint sub_group_local_id );
16061 short4
__ovld __conv intel_sub_group_broadcast( short4 x,
uint sub_group_local_id );
16062 short8
__ovld __conv intel_sub_group_broadcast( short8 x,
uint sub_group_local_id );
16065 ushort2
__ovld __conv intel_sub_group_broadcast( ushort2 x,
uint sub_group_local_id );
16066 ushort3
__ovld __conv intel_sub_group_broadcast( ushort3 x,
uint sub_group_local_id );
16067 ushort4
__ovld __conv intel_sub_group_broadcast( ushort4 x,
uint sub_group_local_id );
16068 ushort8
__ovld __conv intel_sub_group_broadcast( ushort8 x,
uint sub_group_local_id );
16084 short __ovld __conv intel_sub_group_shuffle_down(
short cur,
short next,
uint c );
16085 short2
__ovld __conv intel_sub_group_shuffle_down( short2 cur, short2 next,
uint c );
16086 short3
__ovld __conv intel_sub_group_shuffle_down( short3 cur, short3 next,
uint c );
16087 short4
__ovld __conv intel_sub_group_shuffle_down( short4 cur, short4 next,
uint c );
16088 short8
__ovld __conv intel_sub_group_shuffle_down( short8 cur, short8 next,
uint c );
16089 short16
__ovld __conv intel_sub_group_shuffle_down( short16 cur, short16 next,
uint c );
16092 ushort2
__ovld __conv intel_sub_group_shuffle_down( ushort2 cur, ushort2 next,
uint c );
16093 ushort3
__ovld __conv intel_sub_group_shuffle_down( ushort3 cur, ushort3 next,
uint c );
16094 ushort4
__ovld __conv intel_sub_group_shuffle_down( ushort4 cur, ushort4 next,
uint c );
16095 ushort8
__ovld __conv intel_sub_group_shuffle_down( ushort8 cur, ushort8 next,
uint c );
16096 ushort16
__ovld __conv intel_sub_group_shuffle_down( ushort16 cur, ushort16 next,
uint c );
16098 short __ovld __conv intel_sub_group_shuffle_up(
short cur,
short next,
uint c );
16099 short2
__ovld __conv intel_sub_group_shuffle_up( short2 cur, short2 next,
uint c );
16100 short3
__ovld __conv intel_sub_group_shuffle_up( short3 cur, short3 next,
uint c );
16101 short4
__ovld __conv intel_sub_group_shuffle_up( short4 cur, short4 next,
uint c );
16102 short8
__ovld __conv intel_sub_group_shuffle_up( short8 cur, short8 next,
uint c );
16103 short16
__ovld __conv intel_sub_group_shuffle_up( short16 cur, short16 next,
uint c );
16106 ushort2
__ovld __conv intel_sub_group_shuffle_up( ushort2 cur, ushort2 next,
uint c );
16107 ushort3
__ovld __conv intel_sub_group_shuffle_up( ushort3 cur, ushort3 next,
uint c );
16108 ushort4
__ovld __conv intel_sub_group_shuffle_up( ushort4 cur, ushort4 next,
uint c );
16109 ushort8
__ovld __conv intel_sub_group_shuffle_up( ushort8 cur, ushort8 next,
uint c );
16110 ushort16
__ovld __conv intel_sub_group_shuffle_up( ushort16 cur, ushort16 next,
uint c );
16126 short __ovld __conv intel_sub_group_reduce_add(
short x );
16128 short __ovld __conv intel_sub_group_reduce_min(
short x );
16130 short __ovld __conv intel_sub_group_reduce_max(
short x );
16133 short __ovld __conv intel_sub_group_scan_exclusive_add(
short x );
16135 short __ovld __conv intel_sub_group_scan_exclusive_min(
short x );
16137 short __ovld __conv intel_sub_group_scan_exclusive_max(
short x );
16140 short __ovld __conv intel_sub_group_scan_inclusive_add(
short x );
16142 short __ovld __conv intel_sub_group_scan_inclusive_min(
short x );
16144 short __ovld __conv intel_sub_group_scan_inclusive_max(
short x );
16147 uint __ovld __conv intel_sub_group_block_read_ui( read_only image2d_t image, int2 byte_coord );
16148 uint2
__ovld __conv intel_sub_group_block_read_ui2( read_only image2d_t image, int2 byte_coord );
16149 uint4
__ovld __conv intel_sub_group_block_read_ui4( read_only image2d_t image, int2 byte_coord );
16150 uint8
__ovld __conv intel_sub_group_block_read_ui8( read_only image2d_t image, int2 byte_coord );
16152 #if (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) 16153 uint __ovld __conv intel_sub_group_block_read_ui( read_write image2d_t image, int2 byte_coord );
16154 uint2
__ovld __conv intel_sub_group_block_read_ui2( read_write image2d_t image, int2 byte_coord );
16155 uint4
__ovld __conv intel_sub_group_block_read_ui4( read_write image2d_t image, int2 byte_coord );
16156 uint8
__ovld __conv intel_sub_group_block_read_ui8( read_write image2d_t image, int2 byte_coord );
16157 #endif // (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) 16160 uint2
__ovld __conv intel_sub_group_block_read_ui2(
const __global
uint* p );
16161 uint4
__ovld __conv intel_sub_group_block_read_ui4(
const __global
uint* p );
16162 uint8
__ovld __conv intel_sub_group_block_read_ui8(
const __global
uint* p );
16164 void __ovld __conv intel_sub_group_block_write_ui( read_only image2d_t image, int2 byte_coord,
uint data );
16165 void __ovld __conv intel_sub_group_block_write_ui2( read_only image2d_t image, int2 byte_coord, uint2 data );
16166 void __ovld __conv intel_sub_group_block_write_ui4( read_only image2d_t image, int2 byte_coord, uint4 data );
16167 void __ovld __conv intel_sub_group_block_write_ui8( read_only image2d_t image, int2 byte_coord, uint8 data );
16169 #if (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) 16170 void __ovld __conv intel_sub_group_block_write_ui( read_write image2d_t image, int2 byte_coord,
uint data );
16171 void __ovld __conv intel_sub_group_block_write_ui2( read_write image2d_t image, int2 byte_coord, uint2 data );
16172 void __ovld __conv intel_sub_group_block_write_ui4( read_write image2d_t image, int2 byte_coord, uint4 data );
16173 void __ovld __conv intel_sub_group_block_write_ui8( read_write image2d_t image, int2 byte_coord, uint8 data );
16174 #endif // (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) 16177 void __ovld __conv intel_sub_group_block_write_ui2( __global
uint* p, uint2 data );
16178 void __ovld __conv intel_sub_group_block_write_ui4( __global
uint* p, uint4 data );
16179 void __ovld __conv intel_sub_group_block_write_ui8( __global
uint* p, uint8 data );
16181 ushort __ovld __conv intel_sub_group_block_read_us( read_only image2d_t image, int2 coord );
16182 ushort2
__ovld __conv intel_sub_group_block_read_us2( read_only image2d_t image, int2 coord );
16183 ushort4
__ovld __conv intel_sub_group_block_read_us4( read_only image2d_t image, int2 coord );
16184 ushort8
__ovld __conv intel_sub_group_block_read_us8( read_only image2d_t image, int2 coord );
16186 #if (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) 16187 ushort __ovld __conv intel_sub_group_block_read_us(read_write image2d_t image, int2 coord);
16188 ushort2
__ovld __conv intel_sub_group_block_read_us2(read_write image2d_t image, int2 coord);
16189 ushort4
__ovld __conv intel_sub_group_block_read_us4(read_write image2d_t image, int2 coord);
16190 ushort8
__ovld __conv intel_sub_group_block_read_us8(read_write image2d_t image, int2 coord);
16191 #endif // (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) 16198 void __ovld __conv intel_sub_group_block_write_us(write_only image2d_t image, int2 coord,
ushort data);
16199 void __ovld __conv intel_sub_group_block_write_us2(write_only image2d_t image, int2 coord, ushort2 data);
16200 void __ovld __conv intel_sub_group_block_write_us4(write_only image2d_t image, int2 coord, ushort4 data);
16201 void __ovld __conv intel_sub_group_block_write_us8(write_only image2d_t image, int2 coord, ushort8 data);
16203 #if (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) 16204 void __ovld __conv intel_sub_group_block_write_us(read_write image2d_t image, int2 coord,
ushort data);
16205 void __ovld __conv intel_sub_group_block_write_us2(read_write image2d_t image, int2 coord, ushort2 data);
16206 void __ovld __conv intel_sub_group_block_write_us4(read_write image2d_t image, int2 coord, ushort4 data);
16207 void __ovld __conv intel_sub_group_block_write_us8(read_write image2d_t image, int2 coord, ushort8 data);
16208 #endif // (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) 16211 void __ovld __conv intel_sub_group_block_write_us2( __global
ushort* p, ushort2 data );
16212 void __ovld __conv intel_sub_group_block_write_us4( __global
ushort* p, ushort4 data );
16213 void __ovld __conv intel_sub_group_block_write_us8( __global
ushort* p, ushort8 data );
16214 #endif // cl_intel_subgroups_short 16216 #ifdef cl_intel_device_side_avc_motion_estimation 16217 #pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : begin 16219 #define CLK_AVC_ME_MAJOR_16x16_INTEL 0x0 16220 #define CLK_AVC_ME_MAJOR_16x8_INTEL 0x1 16221 #define CLK_AVC_ME_MAJOR_8x16_INTEL 0x2 16222 #define CLK_AVC_ME_MAJOR_8x8_INTEL 0x3 16224 #define CLK_AVC_ME_MINOR_8x8_INTEL 0x0 16225 #define CLK_AVC_ME_MINOR_8x4_INTEL 0x1 16226 #define CLK_AVC_ME_MINOR_4x8_INTEL 0x2 16227 #define CLK_AVC_ME_MINOR_4x4_INTEL 0x3 16229 #define CLK_AVC_ME_MAJOR_FORWARD_INTEL 0x0 16230 #define CLK_AVC_ME_MAJOR_BACKWARD_INTEL 0x1 16231 #define CLK_AVC_ME_MAJOR_BIDIRECTIONAL_INTEL 0x2 16233 #define CLK_AVC_ME_PARTITION_MASK_ALL_INTEL 0x0 16234 #define CLK_AVC_ME_PARTITION_MASK_16x16_INTEL 0x7E 16235 #define CLK_AVC_ME_PARTITION_MASK_16x8_INTEL 0x7D 16236 #define CLK_AVC_ME_PARTITION_MASK_8x16_INTEL 0x7B 16237 #define CLK_AVC_ME_PARTITION_MASK_8x8_INTEL 0x77 16238 #define CLK_AVC_ME_PARTITION_MASK_8x4_INTEL 0x6F 16239 #define CLK_AVC_ME_PARTITION_MASK_4x8_INTEL 0x5F 16240 #define CLK_AVC_ME_PARTITION_MASK_4x4_INTEL 0x3F 16242 #define CLK_AVC_ME_SLICE_TYPE_PRED_INTEL 0x0 16243 #define CLK_AVC_ME_SLICE_TYPE_BPRED_INTEL 0x1 16244 #define CLK_AVC_ME_SLICE_TYPE_INTRA_INTEL 0x2 16246 #define CLK_AVC_ME_SEARCH_WINDOW_EXHAUSTIVE_INTEL 0x0 16247 #define CLK_AVC_ME_SEARCH_WINDOW_SMALL_INTEL 0x1 16248 #define CLK_AVC_ME_SEARCH_WINDOW_TINY_INTEL 0x2 16249 #define CLK_AVC_ME_SEARCH_WINDOW_EXTRA_TINY_INTEL 0x3 16250 #define CLK_AVC_ME_SEARCH_WINDOW_DIAMOND_INTEL 0x4 16251 #define CLK_AVC_ME_SEARCH_WINDOW_LARGE_DIAMOND_INTEL 0x5 16252 #define CLK_AVC_ME_SEARCH_WINDOW_RESERVED0_INTEL 0x6 16253 #define CLK_AVC_ME_SEARCH_WINDOW_RESERVED1_INTEL 0x7 16254 #define CLK_AVC_ME_SEARCH_WINDOW_CUSTOM_INTEL 0x8 16256 #define CLK_AVC_ME_SAD_ADJUST_MODE_NONE_INTEL 0x0 16257 #define CLK_AVC_ME_SAD_ADJUST_MODE_HAAR_INTEL 0x2 16259 #define CLK_AVC_ME_SUBPIXEL_MODE_INTEGER_INTEL 0x0 16260 #define CLK_AVC_ME_SUBPIXEL_MODE_HPEL_INTEL 0x1 16261 #define CLK_AVC_ME_SUBPIXEL_MODE_QPEL_INTEL 0x3 16263 #define CLK_AVC_ME_COST_PRECISION_QPEL_INTEL 0x0 16264 #define CLK_AVC_ME_COST_PRECISION_HPEL_INTEL 0x1 16265 #define CLK_AVC_ME_COST_PRECISION_PEL_INTEL 0x2 16266 #define CLK_AVC_ME_COST_PRECISION_DPEL_INTEL 0x3 16268 #define CLK_AVC_ME_BIDIR_WEIGHT_QUARTER_INTEL 0x10 16269 #define CLK_AVC_ME_BIDIR_WEIGHT_THIRD_INTEL 0x15 16270 #define CLK_AVC_ME_BIDIR_WEIGHT_HALF_INTEL 0x20 16271 #define CLK_AVC_ME_BIDIR_WEIGHT_TWO_THIRD_INTEL 0x2B 16272 #define CLK_AVC_ME_BIDIR_WEIGHT_THREE_QUARTER_INTEL 0x30 16274 #define CLK_AVC_ME_BORDER_REACHED_LEFT_INTEL 0x0 16275 #define CLK_AVC_ME_BORDER_REACHED_RIGHT_INTEL 0x2 16276 #define CLK_AVC_ME_BORDER_REACHED_TOP_INTEL 0x4 16277 #define CLK_AVC_ME_BORDER_REACHED_BOTTOM_INTEL 0x8 16279 #define CLK_AVC_ME_INTRA_16x16_INTEL 0x0 16280 #define CLK_AVC_ME_INTRA_8x8_INTEL 0x1 16281 #define CLK_AVC_ME_INTRA_4x4_INTEL 0x2 16283 #define CLK_AVC_ME_SKIP_BLOCK_PARTITION_16x16_INTEL 0x0 16284 #define CLK_AVC_ME_SKIP_BLOCK_PARTITION_8x8_INTEL 0x4000 16286 #define CLK_AVC_ME_SKIP_BLOCK_16x16_FORWARD_ENABLE_INTEL (0x1 << 24) 16287 #define CLK_AVC_ME_SKIP_BLOCK_16x16_BACKWARD_ENABLE_INTEL (0x2 << 24) 16288 #define CLK_AVC_ME_SKIP_BLOCK_16x16_DUAL_ENABLE_INTEL (0x3 << 24) 16289 #define CLK_AVC_ME_SKIP_BLOCK_8x8_FORWARD_ENABLE_INTEL (0x55 << 24) 16290 #define CLK_AVC_ME_SKIP_BLOCK_8x8_BACKWARD_ENABLE_INTEL (0xAA << 24) 16291 #define CLK_AVC_ME_SKIP_BLOCK_8x8_DUAL_ENABLE_INTEL (0xFF << 24) 16292 #define CLK_AVC_ME_SKIP_BLOCK_8x8_0_FORWARD_ENABLE_INTEL (0x1 << 24) 16293 #define CLK_AVC_ME_SKIP_BLOCK_8x8_0_BACKWARD_ENABLE_INTEL (0x2 << 24) 16294 #define CLK_AVC_ME_SKIP_BLOCK_8x8_1_FORWARD_ENABLE_INTEL (0x1 << 26) 16295 #define CLK_AVC_ME_SKIP_BLOCK_8x8_1_BACKWARD_ENABLE_INTEL (0x2 << 26) 16296 #define CLK_AVC_ME_SKIP_BLOCK_8x8_2_FORWARD_ENABLE_INTEL (0x1 << 28) 16297 #define CLK_AVC_ME_SKIP_BLOCK_8x8_2_BACKWARD_ENABLE_INTEL (0x2 << 28) 16298 #define CLK_AVC_ME_SKIP_BLOCK_8x8_3_FORWARD_ENABLE_INTEL (0x1 << 30) 16299 #define CLK_AVC_ME_SKIP_BLOCK_8x8_3_BACKWARD_ENABLE_INTEL (0x2 << 30) 16301 #define CLK_AVC_ME_BLOCK_BASED_SKIP_4x4_INTEL 0x00 16302 #define CLK_AVC_ME_BLOCK_BASED_SKIP_8x8_INTEL 0x80 16304 #define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_ALL_INTEL 0x0 16305 #define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_16x16_INTEL 0x6 16306 #define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_8x8_INTEL 0x5 16307 #define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_4x4_INTEL 0x3 16309 #define CLK_AVC_ME_INTRA_NEIGHBOR_LEFT_MASK_ENABLE_INTEL 0x60 16310 #define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_MASK_ENABLE_INTEL 0x10 16311 #define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_RIGHT_MASK_ENABLE_INTEL 0x8 16312 #define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_LEFT_MASK_ENABLE_INTEL 0x4 16314 #define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_INTEL 0x0 16315 #define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1 16316 #define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DC_INTEL 0x2 16317 #define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_LEFT_INTEL 0x3 16318 #define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_RIGHT_INTEL 0x4 16319 #define CLK_AVC_ME_LUMA_PREDICTOR_MODE_PLANE_INTEL 0x4 16320 #define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_RIGHT_INTEL 0x5 16321 #define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_DOWN_INTEL 0x6 16322 #define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_LEFT_INTEL 0x7 16323 #define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_UP_INTEL 0x8 16324 #define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_DC_INTEL 0x0 16325 #define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1 16326 #define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_VERTICAL_INTEL 0x2 16327 #define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_PLANE_INTEL 0x3 16329 #define CLK_AVC_ME_FRAME_FORWARD_INTEL 0x1 16330 #define CLK_AVC_ME_FRAME_BACKWARD_INTEL 0x2 16331 #define CLK_AVC_ME_FRAME_DUAL_INTEL 0x3 16333 #define CLK_AVC_ME_INTERLACED_SCAN_TOP_FIELD_INTEL 0x0 16334 #define CLK_AVC_ME_INTERLACED_SCAN_BOTTOM_FIELD_INTEL 0x1 16336 #define CLK_AVC_ME_INITIALIZE_INTEL 0x0 16338 #define CLK_AVC_IME_PAYLOAD_INITIALIZE_INTEL 0x0 16339 #define CLK_AVC_REF_PAYLOAD_INITIALIZE_INTEL 0x0 16340 #define CLK_AVC_SIC_PAYLOAD_INITIALIZE_INTEL 0x0 16342 #define CLK_AVC_IME_RESULT_INITIALIZE_INTEL 0x0 16343 #define CLK_AVC_REF_RESULT_INITIALIZE_INTEL 0x0 16344 #define CLK_AVC_SIC_RESULT_INITIALIZE_INTEL 0x0 16346 #define CLK_AVC_IME_RESULT_SINGLE_REFERENCE_STREAMOUT_INITIALIZE_INTEL 0x0 16347 #define CLK_AVC_IME_RESULT_SINGLE_REFERENCE_STREAMIN_INITIALIZE_INTEL 0x0 16348 #define CLK_AVC_IME_RESULT_DUAL_REFERENCE_STREAMOUT_INITIALIZE_INTEL 0x0 16349 #define CLK_AVC_IME_RESULT_DUAL_REFERENCE_STREAMIN_INITIALIZE_INTEL 0x0 16353 intel_sub_group_avc_mce_get_default_inter_base_multi_reference_penalty(
16355 ulong __ovld intel_sub_group_avc_mce_get_default_inter_shape_penalty(
16357 uchar __ovld intel_sub_group_avc_mce_get_default_inter_direction_penalty(
16359 uint __ovld intel_sub_group_avc_mce_get_default_intra_luma_shape_penalty(
16362 intel_sub_group_avc_mce_get_default_inter_motion_vector_cost_table(
16364 uchar __ovld intel_sub_group_avc_mce_get_default_intra_luma_mode_penalty(
16367 uint2
__ovld intel_sub_group_avc_mce_get_default_high_penalty_cost_table();
16368 uint2
__ovld intel_sub_group_avc_mce_get_default_medium_penalty_cost_table();
16369 uint2
__ovld intel_sub_group_avc_mce_get_default_low_penalty_cost_table();
16370 uint __ovld intel_sub_group_avc_mce_get_default_non_dc_luma_intra_penalty();
16372 intel_sub_group_avc_mce_get_default_intra_chroma_mode_base_penalty();
16374 intel_sub_group_avc_mce_payload_t
__ovld 16375 intel_sub_group_avc_mce_set_inter_base_multi_reference_penalty(
16376 uchar reference_base_penalty, intel_sub_group_avc_mce_payload_t payload);
16377 intel_sub_group_avc_mce_payload_t
__ovld 16378 intel_sub_group_avc_mce_set_inter_shape_penalty(
16379 ulong packed_shape_penalty, intel_sub_group_avc_mce_payload_t payload);
16380 intel_sub_group_avc_mce_payload_t
__ovld 16381 intel_sub_group_avc_mce_set_inter_direction_penalty(
16382 uchar direction_cost, intel_sub_group_avc_mce_payload_t payload);
16383 intel_sub_group_avc_mce_payload_t
__ovld 16384 intel_sub_group_avc_mce_set_motion_vector_cost_function(
16385 ulong packed_cost_center_delta, uint2 packed_cost_table,
16386 uchar cost_precision, intel_sub_group_avc_mce_payload_t payload);
16387 intel_sub_group_avc_mce_payload_t
__ovld 16388 intel_sub_group_avc_mce_set_ac_only_haar(
16389 intel_sub_group_avc_mce_payload_t payload);
16390 intel_sub_group_avc_mce_payload_t
__ovld 16391 intel_sub_group_avc_mce_set_source_interlaced_field_polarity(
16392 uchar src_field_polarity, intel_sub_group_avc_mce_payload_t payload);
16393 intel_sub_group_avc_mce_payload_t
__ovld 16394 intel_sub_group_avc_mce_set_single_reference_interlaced_field_polarity(
16395 uchar ref_field_polarity, intel_sub_group_avc_mce_payload_t payload);
16396 intel_sub_group_avc_mce_payload_t
__ovld 16397 intel_sub_group_avc_mce_set_dual_reference_interlaced_field_polarities(
16398 uchar fwd_ref_field_polarity,
uchar bwd_ref_field_polarity,
16399 intel_sub_group_avc_mce_payload_t payload);
16401 ulong __ovld intel_sub_group_avc_mce_get_motion_vectors(
16402 intel_sub_group_avc_mce_result_t result);
16403 ushort __ovld intel_sub_group_avc_mce_get_inter_distortions(
16404 intel_sub_group_avc_mce_result_t result);
16405 ushort __ovld intel_sub_group_avc_mce_get_best_inter_distortion(
16406 intel_sub_group_avc_mce_result_t result);
16407 uchar __ovld intel_sub_group_avc_mce_get_inter_major_shape(
16408 intel_sub_group_avc_mce_result_t result);
16409 uchar __ovld intel_sub_group_avc_mce_get_inter_minor_shapes(
16410 intel_sub_group_avc_mce_result_t result);
16411 uchar __ovld intel_sub_group_avc_mce_get_inter_directions(
16412 intel_sub_group_avc_mce_result_t result);
16413 uchar __ovld intel_sub_group_avc_mce_get_inter_motion_vector_count(
16414 intel_sub_group_avc_mce_result_t result);
16415 uint __ovld intel_sub_group_avc_mce_get_inter_reference_ids(
16416 intel_sub_group_avc_mce_result_t result);
16418 intel_sub_group_avc_mce_get_inter_reference_interlaced_field_polarities(
16419 uint packed_reference_ids,
uint packed_reference_parameter_field_polarities,
16420 intel_sub_group_avc_mce_result_t result);
16423 intel_sub_group_avc_ime_payload_t
__ovld 16424 intel_sub_group_avc_ime_initialize(
16425 ushort2 src_coord,
uchar partition_mask,
uchar sad_adjustment);
16426 intel_sub_group_avc_ime_payload_t
__ovld 16427 intel_sub_group_avc_ime_set_single_reference(
16428 short2 ref_offset,
uchar search_window_config,
16429 intel_sub_group_avc_ime_payload_t payload);
16430 intel_sub_group_avc_ime_payload_t
__ovld 16431 intel_sub_group_avc_ime_set_dual_reference(
16432 short2 fwd_ref_offset, short2 bwd_ref_offset,
uchar search_window_config,
16433 intel_sub_group_avc_ime_payload_t payload);
16434 intel_sub_group_avc_ime_payload_t
__ovld 16435 intel_sub_group_avc_ime_set_max_motion_vector_count(
16436 uchar max_motion_vector_count, intel_sub_group_avc_ime_payload_t payload);
16437 intel_sub_group_avc_ime_payload_t
__ovld 16438 intel_sub_group_avc_ime_set_unidirectional_mix_disable(
16439 intel_sub_group_avc_ime_payload_t payload);
16440 intel_sub_group_avc_ime_payload_t
__ovld 16441 intel_sub_group_avc_ime_set_early_search_termination_threshold(
16442 uchar threshold, intel_sub_group_avc_ime_payload_t payload);
16443 intel_sub_group_avc_ime_payload_t
__ovld 16444 intel_sub_group_avc_ime_set_weighted_sad(
16445 uint packed_sad_weights, intel_sub_group_avc_ime_payload_t payload);
16447 __attribute__((deprecated(
"If you use the latest Intel driver, please use " 16448 "intel_sub_group_avc_ime_ref_window_size instead",
16449 "intel_sub_group_avc_ime_ref_window_size")))
16451 intel_sub_group_ime_ref_window_size(
uchar search_window_config,
char dual_ref);
16452 ushort2
__ovld intel_sub_group_avc_ime_ref_window_size(
16453 uchar search_window_config,
char dual_ref);
16454 short2
__ovld intel_sub_group_avc_ime_adjust_ref_offset(
16455 short2 ref_offset, ushort2 src_coord, ushort2 ref_window_size,
16456 ushort2 image_size);
16458 intel_sub_group_avc_ime_result_t
__ovld 16459 intel_sub_group_avc_ime_evaluate_with_single_reference(
16460 read_only image2d_t src_image, read_only image2d_t ref_image,
16461 sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload);
16462 intel_sub_group_avc_ime_result_t
__ovld 16463 intel_sub_group_avc_ime_evaluate_with_dual_reference(
16464 read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
16465 read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
16466 intel_sub_group_avc_ime_payload_t payload);
16467 intel_sub_group_avc_ime_result_single_reference_streamout_t
__ovld 16468 intel_sub_group_avc_ime_evaluate_with_single_reference_streamout(
16469 read_only image2d_t src_image, read_only image2d_t ref_image,
16470 sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload);
16471 intel_sub_group_avc_ime_result_dual_reference_streamout_t
__ovld 16472 intel_sub_group_avc_ime_evaluate_with_dual_reference_streamout(
16473 read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
16474 read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
16475 intel_sub_group_avc_ime_payload_t payload);
16476 intel_sub_group_avc_ime_result_t
__ovld 16477 intel_sub_group_avc_ime_evaluate_with_single_reference_streamin(
16478 read_only image2d_t src_image, read_only image2d_t ref_image,
16479 sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload,
16480 intel_sub_group_avc_ime_single_reference_streamin_t streamin_components);
16481 intel_sub_group_avc_ime_result_t
__ovld 16482 intel_sub_group_avc_ime_evaluate_with_dual_reference_streamin(
16483 read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
16484 read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
16485 intel_sub_group_avc_ime_payload_t payload,
16486 intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components);
16487 intel_sub_group_avc_ime_result_single_reference_streamout_t
__ovld 16488 intel_sub_group_avc_ime_evaluate_with_single_reference_streaminout(
16489 read_only image2d_t src_image, read_only image2d_t ref_image,
16490 sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload,
16491 intel_sub_group_avc_ime_single_reference_streamin_t streamin_components);
16492 intel_sub_group_avc_ime_result_dual_reference_streamout_t
__ovld 16493 intel_sub_group_avc_ime_evaluate_with_dual_reference_streaminout(
16494 read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
16495 read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
16496 intel_sub_group_avc_ime_payload_t payload,
16497 intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components);
16499 intel_sub_group_avc_ime_single_reference_streamin_t
__ovld 16500 intel_sub_group_avc_ime_get_single_reference_streamin(
16501 intel_sub_group_avc_ime_result_single_reference_streamout_t result);
16502 intel_sub_group_avc_ime_dual_reference_streamin_t
__ovld 16503 intel_sub_group_avc_ime_get_dual_reference_streamin(
16504 intel_sub_group_avc_ime_result_dual_reference_streamout_t result);
16505 intel_sub_group_avc_ime_result_t
__ovld 16506 intel_sub_group_avc_ime_strip_single_reference_streamout(
16507 intel_sub_group_avc_ime_result_single_reference_streamout_t result);
16508 intel_sub_group_avc_ime_result_t
__ovld 16509 intel_sub_group_avc_ime_strip_dual_reference_streamout(
16510 intel_sub_group_avc_ime_result_dual_reference_streamout_t result);
16512 uint __ovld intel_sub_group_avc_ime_get_streamout_major_shape_motion_vectors(
16513 intel_sub_group_avc_ime_result_single_reference_streamout_t result,
16514 uchar major_shape);
16515 ushort __ovld intel_sub_group_avc_ime_get_streamout_major_shape_distortions(
16516 intel_sub_group_avc_ime_result_single_reference_streamout_t result,
16517 uchar major_shape);
16518 uchar __ovld intel_sub_group_avc_ime_get_streamout_major_shape_reference_ids(
16519 intel_sub_group_avc_ime_result_single_reference_streamout_t result,
16520 uchar major_shape);
16521 uint __ovld intel_sub_group_avc_ime_get_streamout_major_shape_motion_vectors(
16522 intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
16524 ushort __ovld intel_sub_group_avc_ime_get_streamout_major_shape_distortions(
16525 intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
16527 uchar __ovld intel_sub_group_avc_ime_get_streamout_major_shape_reference_ids(
16528 intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
16531 uchar __ovld intel_sub_group_avc_ime_get_border_reached(
16532 uchar image_select, intel_sub_group_avc_ime_result_t result);
16533 uchar __ovld intel_sub_group_avc_ime_get_truncated_search_indication(
16534 intel_sub_group_avc_ime_result_t result);
16536 intel_sub_group_avc_ime_get_unidirectional_early_search_termination(
16537 intel_sub_group_avc_ime_result_t result);
16538 uint __ovld intel_sub_group_avc_ime_get_weighting_pattern_minimum_motion_vector(
16539 intel_sub_group_avc_ime_result_t result);
16540 ushort __ovld intel_sub_group_avc_ime_get_weighting_pattern_minimum_distortion(
16541 intel_sub_group_avc_ime_result_t result);
16544 intel_sub_group_avc_ref_payload_t
__ovld 16545 intel_sub_group_avc_fme_initialize(
16546 ushort2 src_coord,
ulong motion_vectors,
uchar major_shapes,
16548 uchar sad_adjustment);
16549 intel_sub_group_avc_ref_payload_t
__ovld 16550 intel_sub_group_avc_bme_initialize(
16551 ushort2 src_coord,
ulong motion_vectors,
uchar major_shapes,
16553 uchar bidirectional_weight,
uchar sad_adjustment);
16555 intel_sub_group_avc_ref_payload_t
__ovld 16556 intel_sub_group_avc_ref_set_bidirectional_mix_disable(
16557 intel_sub_group_avc_ref_payload_t payload);
16558 intel_sub_group_avc_ref_payload_t
__ovld 16559 intel_sub_group_avc_ref_set_bilinear_filter_enable(
16560 intel_sub_group_avc_ref_payload_t payload);
16562 intel_sub_group_avc_ref_result_t
__ovld 16563 intel_sub_group_avc_ref_evaluate_with_single_reference(
16564 read_only image2d_t src_image, read_only image2d_t ref_image,
16565 sampler_t vme_media_sampler, intel_sub_group_avc_ref_payload_t payload);
16566 intel_sub_group_avc_ref_result_t
__ovld 16567 intel_sub_group_avc_ref_evaluate_with_dual_reference(
16568 read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
16569 read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
16570 intel_sub_group_avc_ref_payload_t payload);
16571 intel_sub_group_avc_ref_result_t
__ovld 16572 intel_sub_group_avc_ref_evaluate_with_multi_reference(
16573 read_only image2d_t src_image,
uint packed_reference_ids,
16574 sampler_t vme_media_sampler, intel_sub_group_avc_ref_payload_t payload);
16575 intel_sub_group_avc_ref_result_t
__ovld 16576 intel_sub_group_avc_ref_evaluate_with_multi_reference(
16577 read_only image2d_t src_image,
uint packed_reference_ids,
16578 uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
16579 intel_sub_group_avc_ref_payload_t payload);
16582 intel_sub_group_avc_sic_payload_t
__ovld 16583 intel_sub_group_avc_sic_initialize(
16584 ushort2 src_coord);
16585 intel_sub_group_avc_sic_payload_t
__ovld 16586 intel_sub_group_avc_sic_configure_skc(
16587 uint skip_block_partition_type,
uint skip_motion_vector_mask,
16588 ulong motion_vectors,
uchar bidirectional_weight,
uchar skip_sad_adjustment,
16589 intel_sub_group_avc_sic_payload_t payload);
16590 intel_sub_group_avc_sic_payload_t
__ovld 16591 intel_sub_group_avc_sic_configure_ipe(
16592 uchar luma_intra_partition_mask,
uchar intra_neighbour_availabilty,
16593 uchar left_edge_luma_pixels,
uchar upper_left_corner_luma_pixel,
16594 uchar upper_edge_luma_pixels,
uchar upper_right_edge_luma_pixels,
16595 uchar intra_sad_adjustment, intel_sub_group_avc_sic_payload_t payload);
16596 intel_sub_group_avc_sic_payload_t
__ovld 16597 intel_sub_group_avc_sic_configure_ipe(
16598 uchar luma_intra_partition_mask,
uchar intra_neighbour_availabilty,
16599 uchar left_edge_luma_pixels,
uchar upper_left_corner_luma_pixel,
16600 uchar upper_edge_luma_pixels,
uchar upper_right_edge_luma_pixels,
16601 ushort left_edge_chroma_pixels,
ushort upper_left_corner_chroma_pixel,
16602 ushort upper_edge_chroma_pixels,
uchar intra_sad_adjustment,
16603 intel_sub_group_avc_sic_payload_t payload);
16605 intel_sub_group_avc_sic_get_motion_vector_mask(
16606 uint skip_block_partition_type,
uchar direction);
16608 intel_sub_group_avc_sic_payload_t
__ovld 16609 intel_sub_group_avc_sic_set_intra_luma_shape_penalty(
16610 uint packed_shape_cost, intel_sub_group_avc_sic_payload_t payload);
16611 intel_sub_group_avc_sic_payload_t
__ovld 16612 intel_sub_group_avc_sic_set_intra_luma_mode_cost_function(
16613 uchar luma_mode_penalty,
uint luma_packed_neighbor_modes,
16614 uint luma_packed_non_dc_penalty, intel_sub_group_avc_sic_payload_t payload);
16615 intel_sub_group_avc_sic_payload_t
__ovld 16616 intel_sub_group_avc_sic_set_intra_chroma_mode_cost_function(
16617 uchar chroma_mode_penalty, intel_sub_group_avc_sic_payload_t payload);
16619 intel_sub_group_avc_sic_payload_t
__ovld 16620 intel_sub_group_avc_sic_set_skc_bilinear_filter_enable(
16621 intel_sub_group_avc_sic_payload_t payload);
16622 intel_sub_group_avc_sic_payload_t
__ovld 16623 intel_sub_group_avc_sic_set_skc_forward_transform_enable(
16624 ulong packed_sad_coefficients, intel_sub_group_avc_sic_payload_t payload);
16625 intel_sub_group_avc_sic_payload_t
__ovld 16626 intel_sub_group_avc_sic_set_block_based_raw_skip_sad(
16627 uchar block_based_skip_type,
16628 intel_sub_group_avc_sic_payload_t payload);
16630 intel_sub_group_avc_sic_result_t
__ovld 16631 intel_sub_group_avc_sic_evaluate_ipe(
16632 read_only image2d_t src_image, sampler_t vme_media_sampler,
16633 intel_sub_group_avc_sic_payload_t payload);
16634 intel_sub_group_avc_sic_result_t
__ovld 16635 intel_sub_group_avc_sic_evaluate_with_single_reference(
16636 read_only image2d_t src_image, read_only image2d_t ref_image,
16637 sampler_t vme_media_sampler, intel_sub_group_avc_sic_payload_t payload);
16638 intel_sub_group_avc_sic_result_t
__ovld 16639 intel_sub_group_avc_sic_evaluate_with_dual_reference(
16640 read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
16641 read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
16642 intel_sub_group_avc_sic_payload_t payload);
16643 intel_sub_group_avc_sic_result_t
__ovld 16644 intel_sub_group_avc_sic_evaluate_with_multi_reference(
16645 read_only image2d_t src_image,
uint packed_reference_ids,
16646 sampler_t vme_media_sampler, intel_sub_group_avc_sic_payload_t payload);
16647 intel_sub_group_avc_sic_result_t
__ovld 16648 intel_sub_group_avc_sic_evaluate_with_multi_reference(
16649 read_only image2d_t src_image,
uint packed_reference_ids,
16650 uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
16651 intel_sub_group_avc_sic_payload_t payload);
16653 uchar __ovld intel_sub_group_avc_sic_get_ipe_luma_shape(
16654 intel_sub_group_avc_sic_result_t result);
16655 ushort __ovld intel_sub_group_avc_sic_get_best_ipe_luma_distortion(
16656 intel_sub_group_avc_sic_result_t result);
16657 ushort __ovld intel_sub_group_avc_sic_get_best_ipe_chroma_distortion(
16658 intel_sub_group_avc_sic_result_t result);
16659 ulong __ovld intel_sub_group_avc_sic_get_packed_ipe_luma_modes(
16660 intel_sub_group_avc_sic_result_t result);
16661 uchar __ovld intel_sub_group_avc_sic_get_ipe_chroma_mode(
16662 intel_sub_group_avc_sic_result_t result);
16663 uint __ovld intel_sub_group_avc_sic_get_packed_skc_luma_count_threshold(
16664 intel_sub_group_avc_sic_result_t result);
16665 ulong __ovld intel_sub_group_avc_sic_get_packed_skc_luma_sum_threshold(
16666 intel_sub_group_avc_sic_result_t result);
16667 ushort __ovld intel_sub_group_avc_sic_get_inter_raw_sads(
16668 intel_sub_group_avc_sic_result_t result);
16671 intel_sub_group_avc_ime_payload_t
__ovld 16672 intel_sub_group_avc_ime_set_inter_base_multi_reference_penalty(
16673 uchar reference_base_penalty, intel_sub_group_avc_ime_payload_t payload);
16674 intel_sub_group_avc_ref_payload_t
__ovld 16675 intel_sub_group_avc_ref_set_inter_base_multi_reference_penalty(
16676 uchar reference_base_penalty, intel_sub_group_avc_ref_payload_t payload);
16677 intel_sub_group_avc_sic_payload_t
__ovld 16678 intel_sub_group_avc_sic_set_inter_base_multi_reference_penalty(
16679 uchar reference_base_penalty, intel_sub_group_avc_sic_payload_t payload);
16681 intel_sub_group_avc_ime_payload_t
__ovld 16682 intel_sub_group_avc_ime_set_inter_shape_penalty(
16683 ulong packed_shape_cost, intel_sub_group_avc_ime_payload_t payload);
16684 intel_sub_group_avc_ref_payload_t
__ovld 16685 intel_sub_group_avc_ref_set_inter_shape_penalty(
16686 ulong packed_shape_cost, intel_sub_group_avc_ref_payload_t payload);
16687 intel_sub_group_avc_sic_payload_t
__ovld 16688 intel_sub_group_avc_sic_set_inter_shape_penalty(
16689 ulong packed_shape_cost, intel_sub_group_avc_sic_payload_t payload);
16691 intel_sub_group_avc_ime_payload_t
__ovld 16692 intel_sub_group_avc_ime_set_inter_direction_penalty(
16693 uchar direction_cost, intel_sub_group_avc_ime_payload_t payload);
16694 intel_sub_group_avc_ref_payload_t
__ovld 16695 intel_sub_group_avc_ref_set_inter_direction_penalty(
16696 uchar direction_cost, intel_sub_group_avc_ref_payload_t payload);
16697 intel_sub_group_avc_sic_payload_t
__ovld 16698 intel_sub_group_avc_sic_set_inter_direction_penalty(
16699 uchar direction_cost, intel_sub_group_avc_sic_payload_t payload);
16701 intel_sub_group_avc_ime_payload_t
__ovld 16702 intel_sub_group_avc_ime_set_motion_vector_cost_function(
16703 ulong packed_cost_center_delta, uint2 packed_cost_table,
16704 uchar cost_precision, intel_sub_group_avc_ime_payload_t payload);
16705 intel_sub_group_avc_ref_payload_t
__ovld 16706 intel_sub_group_avc_ref_set_motion_vector_cost_function(
16707 ulong packed_cost_center_delta, uint2 packed_cost_table,
16708 uchar cost_precision, intel_sub_group_avc_ref_payload_t payload);
16709 intel_sub_group_avc_sic_payload_t
__ovld 16710 intel_sub_group_avc_sic_set_motion_vector_cost_function(
16711 ulong packed_cost_center_delta, uint2 packed_cost_table,
16712 uchar cost_precision, intel_sub_group_avc_sic_payload_t payload);
16714 intel_sub_group_avc_ime_payload_t
__ovld 16715 intel_sub_group_avc_ime_set_source_interlaced_field_polarity(
16716 uchar src_field_polarity, intel_sub_group_avc_ime_payload_t payload);
16717 intel_sub_group_avc_ref_payload_t
__ovld 16718 intel_sub_group_avc_ref_set_source_interlaced_field_polarity(
16719 uchar src_field_polarity, intel_sub_group_avc_ref_payload_t payload);
16720 intel_sub_group_avc_sic_payload_t
__ovld 16721 intel_sub_group_avc_sic_set_source_interlaced_field_polarity(
16722 uchar src_field_polarity, intel_sub_group_avc_sic_payload_t payload);
16724 intel_sub_group_avc_ime_payload_t
__ovld 16725 intel_sub_group_avc_ime_set_single_reference_interlaced_field_polarity(
16726 uchar ref_field_polarity, intel_sub_group_avc_ime_payload_t payload);
16727 intel_sub_group_avc_ref_payload_t
__ovld 16728 intel_sub_group_avc_ref_set_single_reference_interlaced_field_polarity(
16729 uchar ref_field_polarity, intel_sub_group_avc_ref_payload_t payload);
16730 intel_sub_group_avc_sic_payload_t
__ovld 16731 intel_sub_group_avc_sic_set_single_reference_interlaced_field_polarity(
16732 uchar ref_field_polarity, intel_sub_group_avc_sic_payload_t payload);
16733 intel_sub_group_avc_ime_payload_t
__ovld 16734 intel_sub_group_avc_ime_set_dual_reference_interlaced_field_polarities(
16735 uchar fwd_ref_field_polarity,
uchar bwd_ref_field_polarity,
16736 intel_sub_group_avc_ime_payload_t payload);
16737 intel_sub_group_avc_ref_payload_t
__ovld 16738 intel_sub_group_avc_ref_set_dual_reference_interlaced_field_polarities(
16739 uchar fwd_ref_field_polarity,
uchar bwd_ref_field_polarity,
16740 intel_sub_group_avc_ref_payload_t payload);
16741 intel_sub_group_avc_sic_payload_t
__ovld 16742 intel_sub_group_avc_sic_set_dual_reference_interlaced_field_polarities(
16743 uchar fwd_ref_field_polarity,
uchar bwd_ref_field_polarity,
16744 intel_sub_group_avc_sic_payload_t payload);
16746 intel_sub_group_avc_ime_payload_t
__ovld 16747 intel_sub_group_avc_ime_set_ac_only_haar(
16748 intel_sub_group_avc_ime_payload_t payload);
16749 intel_sub_group_avc_ref_payload_t
__ovld 16750 intel_sub_group_avc_ref_set_ac_only_haar(
16751 intel_sub_group_avc_ref_payload_t payload);
16752 intel_sub_group_avc_sic_payload_t
__ovld 16753 intel_sub_group_avc_sic_set_ac_only_haar(
16754 intel_sub_group_avc_sic_payload_t payload);
16756 ulong __ovld intel_sub_group_avc_ime_get_motion_vectors(
16757 intel_sub_group_avc_ime_result_t result);
16758 ulong __ovld intel_sub_group_avc_ref_get_motion_vectors(
16759 intel_sub_group_avc_ref_result_t result);
16761 ushort __ovld intel_sub_group_avc_ime_get_inter_distortions(
16762 intel_sub_group_avc_ime_result_t result);
16763 ushort __ovld intel_sub_group_avc_ref_get_inter_distortions(
16764 intel_sub_group_avc_ref_result_t result);
16765 ushort __ovld intel_sub_group_avc_sic_get_inter_distortions(
16766 intel_sub_group_avc_sic_result_t result);
16768 ushort __ovld intel_sub_group_avc_ime_get_best_inter_distortion(
16769 intel_sub_group_avc_ime_result_t result);
16770 ushort __ovld intel_sub_group_avc_ref_get_best_inter_distortion(
16771 intel_sub_group_avc_ref_result_t result);
16773 uchar __ovld intel_sub_group_avc_ime_get_inter_major_shape(
16774 intel_sub_group_avc_ime_result_t result);
16775 uchar __ovld intel_sub_group_avc_ref_get_inter_major_shape(
16776 intel_sub_group_avc_ref_result_t result);
16777 uchar __ovld intel_sub_group_avc_ime_get_inter_minor_shapes(
16778 intel_sub_group_avc_ime_result_t result);
16779 uchar __ovld intel_sub_group_avc_ref_get_inter_minor_shapes(
16780 intel_sub_group_avc_ref_result_t result);
16782 uchar __ovld intel_sub_group_avc_ime_get_inter_directions(
16783 intel_sub_group_avc_ime_result_t result);
16784 uchar __ovld intel_sub_group_avc_ref_get_inter_directions(
16785 intel_sub_group_avc_ref_result_t result);
16787 uchar __ovld intel_sub_group_avc_ime_get_inter_motion_vector_count(
16788 intel_sub_group_avc_ime_result_t result);
16789 uchar __ovld intel_sub_group_avc_ref_get_inter_motion_vector_count(
16790 intel_sub_group_avc_ref_result_t result);
16792 uint __ovld intel_sub_group_avc_ime_get_inter_reference_ids(
16793 intel_sub_group_avc_ime_result_t result);
16794 uint __ovld intel_sub_group_avc_ref_get_inter_reference_ids(
16795 intel_sub_group_avc_ref_result_t result);
16798 intel_sub_group_avc_ime_get_inter_reference_interlaced_field_polarities(
16799 uint packed_reference_ids,
uint packed_reference_parameter_field_polarities,
16800 intel_sub_group_avc_ime_result_t result);
16802 intel_sub_group_avc_ref_get_inter_reference_interlaced_field_polarities(
16803 uint packed_reference_ids,
uint packed_reference_parameter_field_polarities,
16804 intel_sub_group_avc_ref_result_t result);
16807 intel_sub_group_avc_mce_payload_t
__ovld 16808 intel_sub_group_avc_ime_convert_to_mce_payload(
16809 intel_sub_group_avc_ime_payload_t payload);
16810 intel_sub_group_avc_ime_payload_t
__ovld 16811 intel_sub_group_avc_mce_convert_to_ime_payload(
16812 intel_sub_group_avc_mce_payload_t payload);
16813 intel_sub_group_avc_mce_payload_t
__ovld 16814 intel_sub_group_avc_ref_convert_to_mce_payload(
16815 intel_sub_group_avc_ref_payload_t payload);
16816 intel_sub_group_avc_ref_payload_t
__ovld 16817 intel_sub_group_avc_mce_convert_to_ref_payload(
16818 intel_sub_group_avc_mce_payload_t payload);
16819 intel_sub_group_avc_mce_payload_t
__ovld 16820 intel_sub_group_avc_sic_convert_to_mce_payload(
16821 intel_sub_group_avc_sic_payload_t payload);
16822 intel_sub_group_avc_sic_payload_t
__ovld 16823 intel_sub_group_avc_mce_convert_to_sic_payload(
16824 intel_sub_group_avc_mce_payload_t payload);
16826 intel_sub_group_avc_mce_result_t
__ovld 16827 intel_sub_group_avc_ime_convert_to_mce_result(
16828 intel_sub_group_avc_ime_result_t result);
16829 intel_sub_group_avc_ime_result_t
__ovld 16830 intel_sub_group_avc_mce_convert_to_ime_result(
16831 intel_sub_group_avc_mce_result_t result);
16832 intel_sub_group_avc_mce_result_t
__ovld 16833 intel_sub_group_avc_ref_convert_to_mce_result(
16834 intel_sub_group_avc_ref_result_t result);
16835 intel_sub_group_avc_ref_result_t
__ovld 16836 intel_sub_group_avc_mce_convert_to_ref_result(
16837 intel_sub_group_avc_mce_result_t result);
16838 intel_sub_group_avc_mce_result_t
__ovld 16839 intel_sub_group_avc_sic_convert_to_mce_result(
16840 intel_sub_group_avc_sic_result_t result);
16841 intel_sub_group_avc_sic_result_t
__ovld 16842 intel_sub_group_avc_mce_convert_to_sic_result(
16843 intel_sub_group_avc_mce_result_t result);
16844 #pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : end 16845 #endif // cl_intel_device_side_avc_motion_estimation 16847 #ifdef cl_amd_media_ops 16849 uint2
__ovld amd_bitalign(uint2 a, uint2 b, uint2 c);
16850 uint3
__ovld amd_bitalign(uint3 a, uint3 b, uint3 c);
16851 uint4
__ovld amd_bitalign(uint4 a, uint4 b, uint4 c);
16852 uint8
__ovld amd_bitalign(uint8 a, uint8 b, uint8 c);
16853 uint16
__ovld amd_bitalign(uint16 a, uint16 b, uint16 c);
16856 uint2
__ovld amd_bytealign(uint2 a, uint2 b, uint2 c);
16857 uint3
__ovld amd_bytealign(uint3 a, uint3 b, uint3 c);
16858 uint4
__ovld amd_bytealign(uint4 a, uint4 b, uint4 c);
16859 uint8
__ovld amd_bytealign(uint8 a, uint8 b, uint8 c);
16860 uint16
__ovld amd_bytealign(uint16 a, uint16 b, uint16 c);
16863 uint2
__ovld amd_lerp(uint2 a, uint2 b, uint2 c);
16864 uint3
__ovld amd_lerp(uint3 a, uint3 b, uint3 c);
16865 uint4
__ovld amd_lerp(uint4 a, uint4 b, uint4 c);
16866 uint8
__ovld amd_lerp(uint8 a, uint8 b, uint8 c);
16867 uint16
__ovld amd_lerp(uint16 a, uint16 b, uint16 c);
16874 uint2
__ovld amd_sadhi(uint2 a, uint2 b, uint2 c);
16875 uint3
__ovld amd_sadhi(uint3 a, uint3 b, uint3 c);
16876 uint4
__ovld amd_sadhi(uint4 a, uint4 b, uint4 c);
16877 uint8
__ovld amd_sadhi(uint8 a, uint8 b, uint8 c);
16878 uint16
__ovld amd_sadhi(uint16 a, uint16 b, uint16 c);
16881 uint2
__ovld amd_sad(uint2 a, uint2 b, uint2 c);
16882 uint3
__ovld amd_sad(uint3 a, uint3 b, uint3 c);
16883 uint4
__ovld amd_sad(uint4 a, uint4 b, uint4 c);
16884 uint8
__ovld amd_sad(uint8 a, uint8 b, uint8 c);
16885 uint16
__ovld amd_sad(uint16 a, uint16 b, uint16 c);
16888 float2
__ovld amd_unpack0(uint2 a);
16889 float3
__ovld amd_unpack0(uint3 a);
16890 float4
__ovld amd_unpack0(uint4 a);
16891 float8
__ovld amd_unpack0(uint8 a);
16892 float16
__ovld amd_unpack0(uint16 a);
16895 float2
__ovld amd_unpack1(uint2 a);
16896 float3
__ovld amd_unpack1(uint3 a);
16897 float4
__ovld amd_unpack1(uint4 a);
16898 float8
__ovld amd_unpack1(uint8 a);
16899 float16
__ovld amd_unpack1(uint16 a);
16902 float2
__ovld amd_unpack2(uint2 a);
16903 float3
__ovld amd_unpack2(uint3 a);
16904 float4
__ovld amd_unpack2(uint4 a);
16905 float8
__ovld amd_unpack2(uint8 a);
16906 float16
__ovld amd_unpack2(uint16 a);
16909 float2
__ovld amd_unpack3(uint2 a);
16910 float3
__ovld amd_unpack3(uint3 a);
16911 float4
__ovld amd_unpack3(uint4 a);
16912 float8
__ovld amd_unpack3(uint8 a);
16913 float16
__ovld amd_unpack3(uint16 a);
16914 #endif // cl_amd_media_ops 16916 #ifdef cl_amd_media_ops2 16918 int2
__ovld amd_bfe(int2 src0, uint2 src1, uint2 src2);
16919 int3
__ovld amd_bfe(int3 src0, uint3 src1, uint3 src2);
16920 int4
__ovld amd_bfe(int4 src0, uint4 src1, uint4 src2);
16921 int8
__ovld amd_bfe(int8 src0, uint8 src1, uint8 src2);
16922 int16
__ovld amd_bfe(int16 src0, uint16 src1, uint16 src2);
16925 uint2
__ovld amd_bfe(uint2 src0, uint2 src1, uint2 src2);
16926 uint3
__ovld amd_bfe(uint3 src0, uint3 src1, uint3 src2);
16927 uint4
__ovld amd_bfe(uint4 src0, uint4 src1, uint4 src2);
16928 uint8
__ovld amd_bfe(uint8 src0, uint8 src1, uint8 src2);
16929 uint16
__ovld amd_bfe(uint16 src0, uint16 src1, uint16 src2);
16932 uint2
__ovld amd_bfm(uint2 src0, uint2 src1);
16933 uint3
__ovld amd_bfm(uint3 src0, uint3 src1);
16934 uint4
__ovld amd_bfm(uint4 src0, uint4 src1);
16935 uint8
__ovld amd_bfm(uint8 src0, uint8 src1);
16936 uint16
__ovld amd_bfm(uint16 src0, uint16 src1);
16938 float __ovld amd_max3(
float src0,
float src1,
float src2);
16939 float2
__ovld amd_max3(float2 src0, float2 src1, float2 src2);
16940 float3
__ovld amd_max3(float3 src0, float3 src1, float3 src2);
16941 float4
__ovld amd_max3(float4 src0, float4 src1, float4 src2);
16942 float8
__ovld amd_max3(float8 src0, float8 src1, float8 src2);
16943 float16
__ovld amd_max3(float16 src0, float16 src1, float16 src2);
16945 int __ovld amd_max3(
int src0,
int src1,
int src2);
16946 int2
__ovld amd_max3(int2 src0, int2 src1, int2 src2);
16947 int3
__ovld amd_max3(int3 src0, int3 src1, int3 src2);
16948 int4
__ovld amd_max3(int4 src0, int4 src1, int4 src2);
16949 int8
__ovld amd_max3(int8 src0, int8 src1, int8 src2);
16950 int16
__ovld amd_max3(int16 src0, int16 src1, int16 src2);
16953 uint2
__ovld amd_max3(uint2 src0, uint2 src1, uint2 src2);
16954 uint3
__ovld amd_max3(uint3 src0, uint3 src1, uint3 src2);
16955 uint4
__ovld amd_max3(uint4 src0, uint4 src1, uint4 src2);
16956 uint8
__ovld amd_max3(uint8 src0, uint8 src1, uint8 src2);
16957 uint16
__ovld amd_max3(uint16 src0, uint16 src1, uint16 src2);
16959 float __ovld amd_median3(
float src0,
float src1,
float src2);
16960 float2
__ovld amd_median3(float2 src0, float2 src1, float2 src2);
16961 float3
__ovld amd_median3(float3 src0, float3 src1, float3 src2);
16962 float4
__ovld amd_median3(float4 src0, float4 src1, float4 src2);
16963 float8
__ovld amd_median3(float8 src0, float8 src1, float8 src2);
16964 float16
__ovld amd_median3(float16 src0, float16 src1, float16 src2);
16966 int __ovld amd_median3(
int src0,
int src1,
int src2);
16967 int2
__ovld amd_median3(int2 src0, int2 src1, int2 src2);
16968 int3
__ovld amd_median3(int3 src0, int3 src1, int3 src2);
16969 int4
__ovld amd_median3(int4 src0, int4 src1, int4 src2);
16970 int8
__ovld amd_median3(int8 src0, int8 src1, int8 src2);
16971 int16
__ovld amd_median3(int16 src0, int16 src1, int16 src2);
16974 uint2
__ovld amd_median3(uint2 src0, uint2 src1, uint2 src2);
16975 uint3
__ovld amd_median3(uint3 src0, uint3 src1, uint3 src2);
16976 uint4
__ovld amd_median3(uint4 src0, uint4 src1, uint4 src2);
16977 uint8
__ovld amd_median3(uint8 src0, uint8 src1, uint8 src2);
16978 uint16
__ovld amd_median3(uint16 src0, uint16 src1, uint16 src2);
16980 float __ovld amd_min3(
float src0,
float src1,
float src);
16981 float2
__ovld amd_min3(float2 src0, float2 src1, float2 src);
16982 float3
__ovld amd_min3(float3 src0, float3 src1, float3 src);
16983 float4
__ovld amd_min3(float4 src0, float4 src1, float4 src);
16984 float8
__ovld amd_min3(float8 src0, float8 src1, float8 src);
16985 float16
__ovld amd_min3(float16 src0, float16 src1, float16 src);
16987 int __ovld amd_min3(
int src0,
int src1,
int src2);
16988 int2
__ovld amd_min3(int2 src0, int2 src1, int2 src2);
16989 int3
__ovld amd_min3(int3 src0, int3 src1, int3 src2);
16990 int4
__ovld amd_min3(int4 src0, int4 src1, int4 src2);
16991 int8
__ovld amd_min3(int8 src0, int8 src1, int8 src2);
16992 int16
__ovld amd_min3(int16 src0, int16 src1, int16 src2);
16995 uint2
__ovld amd_min3(uint2 src0, uint2 src1, uint2 src2);
16996 uint3
__ovld amd_min3(uint3 src0, uint3 src1, uint3 src2);
16997 uint4
__ovld amd_min3(uint4 src0, uint4 src1, uint4 src2);
16998 uint8
__ovld amd_min3(uint8 src0, uint8 src1, uint8 src2);
16999 uint16
__ovld amd_min3(uint16 src0, uint16 src1, uint16 src2);
17002 ulong2
__ovld amd_mqsad(ulong2 src0, uint2 src1, ulong2 src2);
17003 ulong3
__ovld amd_mqsad(ulong3 src0, uint3 src1, ulong3 src2);
17004 ulong4
__ovld amd_mqsad(ulong4 src0, uint4 src1, ulong4 src2);
17005 ulong8
__ovld amd_mqsad(ulong8 src0, uint8 src1, ulong8 src2);
17006 ulong16
__ovld amd_mqsad(ulong16 src0, uint16 src1, ulong16 src2);
17009 ulong2
__ovld amd_qsad(ulong2 src0, uint2 src1, ulong2 src2);
17010 ulong3
__ovld amd_qsad(ulong3 src0, uint3 src1, ulong3 src2);
17011 ulong4
__ovld amd_qsad(ulong4 src0, uint4 src1, ulong4 src2);
17012 ulong8
__ovld amd_qsad(ulong8 src0, uint8 src1, ulong8 src2);
17013 ulong16
__ovld amd_qsad(ulong16 src0, uint16 src1, ulong16 src2);
17016 uint2
__ovld amd_msad(uint2 src0, uint2 src1, uint2 src2);
17017 uint3
__ovld amd_msad(uint3 src0, uint3 src1, uint3 src2);
17018 uint4
__ovld amd_msad(uint4 src0, uint4 src1, uint4 src2);
17019 uint8
__ovld amd_msad(uint8 src0, uint8 src1, uint8 src2);
17020 uint16
__ovld amd_msad(uint16 src0, uint16 src1, uint16 src2);
17023 uint2
__ovld amd_sadd(uint2 src0, uint2 src1, uint2 src2);
17024 uint3
__ovld amd_sadd(uint3 src0, uint3 src1, uint3 src2);
17025 uint4
__ovld amd_sadd(uint4 src0, uint4 src1, uint4 src2);
17026 uint8
__ovld amd_sadd(uint8 src0, uint8 src1, uint8 src2);
17027 uint16
__ovld amd_sadd(uint16 src0, uint16 src1, uint16 src2);
17030 uint2
__ovld amd_sadw(uint2 src0, uint2 src1, uint2 src2);
17031 uint3
__ovld amd_sadw(uint3 src0, uint3 src1, uint3 src2);
17032 uint4
__ovld amd_sadw(uint4 src0, uint4 src1, uint4 src2);
17033 uint8
__ovld amd_sadw(uint8 src0, uint8 src1, uint8 src2);
17034 uint16
__ovld amd_sadw(uint16 src0, uint16 src1, uint16 src2);
17035 #endif // cl_amd_media_ops2 17038 #pragma OPENCL EXTENSION all : disable 17042 #endif //_OPENCL_H_ uchar16 __ovld __cnfn convert_uchar16_rtn(char16)
void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order)
ushort __ovld __cnfn convert_ushort_sat_rtn(char)
uchar __ovld __cnfn convert_uchar_sat_rtz(char)
int16 __ovld __cnfn convert_int16_sat_rtp(char16)
short4 __ovld __cnfn convert_short4_rtp(char4)
float __ovld __cnfn logb(float x)
Compute the exponent of x, which is the integral part of logr | x |.
short16 __ovld __cnfn convert_short16_sat_rtp(char16)
float __ovld __cnfn erfc(float)
Complementary error function.
void __ovld vstore_half3_rtz(float3 data, size_t offset, half *p)
uchar4 __ovld __cnfn convert_uchar4_rte(char4)
float __ovld __cnfn tanh(float)
Compute hyperbolic tangent.
uchar16 __ovld __cnfn convert_uchar16_sat_rte(char16)
ulong8 __ovld __cnfn convert_ulong8_rtz(char8)
float __ovld __cnfn minmag(float x, float y)
Returns x if | x | < | y |, y if | y | < | x |, otherwise fmin(x, y).
long4 __ovld __cnfn convert_long4_sat_rtz(char4)
float __ovld __cnfn half_divide(float x, float y)
Compute x / y.
ushort2 __ovld __cnfn convert_ushort2_rtp(char2)
long2 __ovld __cnfn convert_long2_sat_rtz(char2)
uint3 __ovld __cnfn convert_uint3(char3)
int2 __ovld __cnfn get_image_dim(read_only image2d_t image)
Return the 2D image width and height as an int2 type.
short3 __ovld __cnfn convert_short3_sat_rte(char3)
long __ovld __cnfn convert_long_sat_rte(char)
int16 __ovld __cnfn convert_int16_sat(char16)
float __ovld __cnfn trunc(float)
Round to integral value using the round to zero rounding mode.
int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order)
short16 __ovld __cnfn convert_short16_rte(char16)
uchar4 __ovld __cnfn convert_uchar4_sat_rte(char4)
ulong8 __ovld __cnfn convert_ulong8_rte(char8)
float __ovld __cnfn cospi(float x)
Compute cos (PI * x).
short16 __ovld __cnfn convert_short16_sat_rtn(char16)
float __ovld __cnfn remainder(float x, float y)
Compute the value r such that r = x - n*y, where n is the integer nearest the exact value of x/y...
float3 __ovld __cnfn convert_float3_rtn(char3)
void __ovld write_imagei(write_only image2d_t image, int2 coord, int4 color)
float __ovld __cnfn fmod(float x, float y)
Modulus.
float __ovld __cnfn native_rsqrt(float x)
Compute inverse square root over an implementationdefined range.
float __ovld __cnfn native_exp(float x)
Compute the base- e exponential of x over an implementation-defined range.
char2 __ovld __cnfn convert_char2_rtz(char2)
int __ovld __cnfn convert_int_rte(char)
ulong4 __ovld __cnfn convert_ulong4_sat_rtp(char4)
void __ovld vstore_half8_rtn(float8 data, size_t offset, half *p)
uint3 __ovld __cnfn convert_uint3_sat_rtp(char3)
uint8 __ovld __cnfn convert_uint8_rtn(char8)
bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected, int desired, memory_order success, memory_order failure)
void __ovld vstore_half8(float8 data, size_t offset, half *p)
uchar2 __ovld __cnfn convert_uchar2_sat(char2)
uint8 __ovld __cnfn convert_uint8_sat(char8)
void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order)
short2 __ovld __cnfn convert_short2_sat_rtz(char2)
__SIZE_TYPE__ size_t
The unsigned integer type of the result of the sizeof operator.
ulong3 __ovld __cnfn convert_ulong3_sat_rtp(char3)
int3 __ovld __cnfn convert_int3_sat_rtz(char3)
long4 __ovld __cnfn convert_long4_rtz(char4)
float __ovld __cnfn ceil(float)
Round to integral value using the round to positive infinity rounding mode.
ulong4 __ovld __cnfn convert_ulong4_sat_rtn(char4)
uchar3 __ovld __cnfn convert_uchar3_sat_rtz(char3)
long8 __ovld __cnfn convert_long8_rtn(char8)
int __ovld __cnfn get_image_width(read_only image1d_t image)
Return the image width in pixels.
char3 __ovld __cnfn convert_char3(char3)
float3 __ovld vload_half3(size_t offset, const __constant half *p)
int __ovld __cnfn mul24(int x, int y)
Multiply two 24-bit integer values x and y.
uchar __ovld __cnfn convert_uchar_rtz(char)
ushort16 __ovld __cnfn convert_ushort16_sat_rtn(char16)
int __ovld atomic_or(volatile __global int *p, int val)
Read the 32-bit value (referred to as old) stored at location pointed by p.
void __ovld vstorea_half16_rtn(float16 data, size_t offset, half *p)
ushort __ovld __cnfn convert_ushort_rtn(char)
int __ovld __cnfn all(char x)
Returns 1 if the most significant bit in all components of x is set; otherwise returns 0...
float __ovld __cnfn native_powr(float x, float y)
Compute x to the power y, where x is >= 0.
char __ovld __cnfn convert_char_sat_rte(char)
void __ovld vstorea_half16_rtp(float16 data, size_t offset, half *p)
float __ovld __cnfn tgamma(float)
Compute the gamma function.
uint3 __ovld __cnfn convert_uint3_rte(char3)
ulong __ovld __cnfn convert_ulong_sat_rtp(char)
char4 __ovld __cnfn convert_char4_rtn(char4)
bool __ovld atomic_compare_exchange_weak(volatile atomic_int *object, int *expected, int desired)
long16 __ovld __cnfn convert_long16_sat_rtz(char16)
int __ovld __cnfn isgreater(float x, float y)
Returns the component-wise compare of x > y.
uchar3 __ovld __cnfn convert_uchar3_rtn(char3)
uint16 __ovld __cnfn convert_uint16_sat(char16)
ulong2 __ovld __cnfn convert_ulong2(char2)
short3 __ovld __cnfn convert_short3_sat_rtn(char3)
int __ovld __cnfn signbit(float)
Test for sign bit.
uint __ovld __cnfn convert_uint_sat_rte(char)
ushort3 __ovld __cnfn convert_ushort3_rte(char3)
int4 __ovld __cnfn convert_int4_sat_rtn(char4)
long __ovld __cnfn convert_long_rtp(char)
long3 __ovld __cnfn convert_long3_sat_rtz(char3)
float __ovld __cnfn tanpi(float x)
Compute tan (PI * x).
void __ovld vstorea_half_rtz(float data, size_t offset, half *p)
float8 __ovld __cnfn convert_float8_rte(char8)
float16 __ovld __cnfn convert_float16_rtz(char16)
char4 __ovld __cnfn convert_char4_sat_rtz(char4)
float __ovld __cnfn expm1(float x)
Compute e^x- 1.0.
int __ovld atomic_fetch_add(volatile atomic_int *object, int operand)
queue_t __ovld get_default_queue(void)
int __ovld __conv work_group_scan_inclusive_min(int x)
char16 __ovld vload16(size_t offset, const __constant char *p)
float __ovld __cnfn pown(float x, int y)
Compute x to the power y, where y is an integer.
ulong __ovld __cnfn convert_ulong_rtz(char)
float __ovld __cnfn fmax(float x, float y)
Returns y if x < y, otherwise it returns x.
float __ovld __cnfn nextafter(float x, float y)
Computes the next representable single-precision floating-point value following x in the direction of...
float __ovld __cnfn atan(float y_over_x)
Arc tangent function.
ushort16 __ovld __cnfn convert_ushort16_sat_rte(char16)
int2 __ovld __cnfn convert_int2_sat_rtp(char2)
int16 __ovld __cnfn convert_int16_rtp(char16)
uint3 __ovld __cnfn convert_uint3_rtn(char3)
long16 __ovld __cnfn convert_long16_sat(char16)
int __ovld atomic_fetch_sub(volatile atomic_int *object, int operand)
int __ovld __conv work_group_scan_exclusive_min(int x)
uchar __ovld __cnfn convert_uchar_sat_rtp(char)
float __ovld __cnfn native_tan(float x)
Compute tangent over an implementation-defined range.
ushort3 __ovld __cnfn convert_ushort3_rtz(char3)
uchar3 __ovld __cnfn convert_uchar3_rtp(char3)
float __ovld __cnfn asinpi(float x)
Compute asin (x) / PI.
int __ovld atomic_min(volatile __global int *p, int val)
Read the 32-bit value (referred to as old) stored at location pointed by p.
uchar2 __ovld __cnfn convert_uchar2_sat_rte(char2)
char8 __ovld __cnfn convert_char8_rtp(char8)
float __ovld __cnfn cbrt(float)
Compute cube-root.
bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order)
char __ovld __cnfn clamp(char x, char minval, char maxval)
Returns min(max(x, minval), maxval).
uchar __ovld __cnfn convert_uchar_rtp(char)
float __ovld __cnfn log10(float)
Compute a base 10 logarithm.
uchar2 __ovld __cnfn convert_uchar2_rte(char2)
float __ovld __cnfn half_log10(float x)
Compute a base 10 logarithm.
ndrange_t __ovld ndrange_1D(size_t)
uint2 __ovld __cnfn convert_uint2_sat_rtz(char2)
float __ovld __cnfn native_exp10(float x)
Compute the base- 10 exponential of x over an implementation-defined range.
int __ovld atomic_xor(volatile __global int *p, int val)
Read the 32-bit value (referred to as old) stored at location pointed by p.
float __ovld __cnfn distance(float p0, float p1)
Returns the distance between p0 and p1.
uchar8 __ovld __cnfn convert_uchar8_rte(char8)
char8 __ovld __cnfn convert_char8_sat(char8)
char16 __ovld __cnfn convert_char16(char16)
float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, int2 coord)
Use the coordinate (coord.xy) to do an element lookup in the 2D image object specified by image...
uchar8 __ovld __cnfn convert_uchar8_rtz(char8)
char16 __ovld __cnfn convert_char16_sat_rtp(char16)
char4 __ovld __cnfn convert_char4(char4)
size_t __ovld __cnfn get_global_id(uint dimindx)
Returns the unique global work-item ID value for dimension identified by dimindx. ...
float __ovld __cnfn half_sqrt(float x)
Compute square root.
ushort16 __ovld __cnfn convert_ushort16_rtn(char16)
uchar4 __ovld __cnfn convert_uchar4_rtz(char4)
void __ovld __conv barrier(cl_mem_fence_flags flags)
All work-items in a work-group executing the kernel on a processor must execute this function before ...
ulong2 __ovld __cnfn convert_ulong2_sat_rte(char2)
void __ovld vstore_half3_rtn(float3 data, size_t offset, half *p)
float __ovld __cnfn native_log2(float x)
Compute a base 2 logarithm over an implementationdefined range.
float __ovld __cnfn radians(float degrees)
Converts degrees to radians, i.e.
size_t __ovld __cnfn get_group_id(uint dimindx)
get_group_id returns the work-group ID which is a number from 0 .
uint4 __ovld __cnfn convert_uint4_rtz(char4)
void __ovld vstore_half4_rtz(float4 data, size_t offset, half *p)
void __ovld capture_event_profiling_info(clk_event_t, clk_profiling_info, __global void *value)
uint16 __ovld __cnfn convert_uint16(char16)
char16 __ovld __cnfn convert_char16_rtn(char16)
float __ovld __cnfn erf(float)
Error function encountered in integrating the normal distribution.
uint16 __ovld __cnfn convert_uint16_rte(char16)
float __ovld __cnfn asinh(float)
Inverse hyperbolic sine.
void __ovld vstorea_half3_rtz(float3 data, size_t offset, half *p)
ulong16 __ovld __cnfn convert_ulong16_sat_rtn(char16)
int2 __ovld __cnfn convert_int2_sat_rtz(char2)
char2 __ovld __cnfn convert_char2_sat(char2)
char __ovld __cnfn popcount(char x)
void __ovld vstore_half_rtz(float data, size_t offset, half *p)
uint2 __ovld __cnfn convert_uint2_rtz(char2)
int __ovld atomic_cmpxchg(volatile __global int *p, int cmp, int val)
Read the 32-bit value (referred to as old) stored at location pointed by p.
char __ovld __cnfn convert_char_sat_rtn(char)
void __ovld vstorea_half_rtp(float data, size_t offset, half *p)
long8 __ovld __cnfn convert_long8_sat(char8)
char __ovld __cnfn hadd(char x, char y)
Returns (x + y) >> 1.
void __ovld read_mem_fence(cl_mem_fence_flags flags)
Read memory barrier that orders only loads.
float2 __ovld __cnfn convert_float2_rtp(char2)
ulong4 __ovld __cnfn convert_ulong4_sat(char4)
ushort __ovld __cnfn convert_ushort_sat_rte(char)
void __ovld vstore_half16_rtz(float16 data, size_t offset, half *p)
ulong4 __ovld __cnfn convert_ulong4_sat_rte(char4)
float __ovld __cnfn normalize(float p)
Returns a vector in the same direction as p but with a length of 1.
long8 __ovld __cnfn convert_long8_rtz(char8)
float __ovld __cnfn copysign(float x, float y)
Returns x with its sign changed to match the sign of y.
void __ovld write_imageui(write_only image2d_t image, int2 coord, uint4 color)
unsigned short ushort
An unsigned 16-bit integer.
float __ovld __cnfn mad(float a, float b, float c)
mad approximates a * b + c.
float __ovld __cnfn half_rsqrt(float x)
Compute inverse square root.
uchar2 __ovld __cnfn convert_uchar2_sat_rtn(char2)
ushort16 __ovld __cnfn convert_ushort16_rtz(char16)
char2 __ovld __cnfn convert_char2_rte(char2)
int8 __ovld __cnfn convert_int8_rtz(char8)
long2 __ovld __cnfn convert_long2_rtp(char2)
int3 __ovld __cnfn convert_int3_rte(char3)
long2 __ovld __cnfn convert_long2_rtz(char2)
uint __ovld __cnfn convert_uint_sat_rtz(char)
char8 __ovld vload8(size_t offset, const __constant char *p)
uchar3 __ovld __cnfn convert_uchar3_sat_rtp(char3)
int __ovld __cnfn mad24(int x, int y, int z)
Multiply two 24-bit integer values x and y and add the 32-bit integer result to the 32-bit integer z...
ushort __ovld __cnfn convert_ushort_rtp(char)
size_t __ovld get_global_linear_id(void)
void __ovld prefetch(const __global char *p, size_t num_elements)
Prefetch num_elements * sizeof(gentype) bytes into the global cache.
char2 __ovld __cnfn convert_char2_sat_rte(char2)
ushort2 __ovld __cnfn convert_ushort2_sat_rtp(char2)
ndrange_t __ovld ndrange_2D(const size_t[2])
void __ovld vstore_half8_rtp(float8 data, size_t offset, half *p)
ulong2 __ovld __cnfn convert_ulong2_sat_rtz(char2)
uchar8 __ovld __cnfn convert_uchar8_sat(char8)
clk_event_t __ovld create_user_event(void)
int __ovld __cnfn ilogb(float x)
Return the exponent as an integer value.
float __ovld __cnfn sin(float)
Compute sine.
short __ovld __cnfn convert_short_rtz(char)
uint4 __ovld __cnfn convert_uint4(char4)
bool __ovld atomic_compare_exchange_strong(volatile atomic_int *object, int *expected, int desired)
ulong3 __ovld __cnfn convert_ulong3(char3)
uint __ovld __cnfn convert_uint_sat_rtn(char)
int __ovld __conv work_group_reduce_max(int x)
short8 __ovld __cnfn convert_short8_rtp(char8)
short2 __ovld __cnfn convert_short2_sat(char2)
float __ovld __cnfn native_exp2(float x)
Compute the base- 2 exponential of x over an implementation-defined range.
ulong __ovld __cnfn convert_ulong_sat_rtn(char)
int __ovld __cnfn isfinite(float)
Test for finite value.
short8 __ovld __cnfn convert_short8_sat_rtp(char8)
int8 __ovld __cnfn convert_int8_rtn(char8)
char8 __ovld __cnfn convert_char8_rte(char8)
uchar __ovld __cnfn convert_uchar(char)
ulong16 __ovld __cnfn convert_ulong16_sat_rte(char16)
float __ovld __cnfn nan(uint nancode)
Returns a quiet NaN.
int __ovld __cnfn islessequal(float x, float y)
Returns the component-wise compare of x <= y.
ulong4 __ovld __cnfn convert_ulong4_rtz(char4)
void __ovld vstorea_half4_rtp(float4 data, size_t offset, half *p)
char __ovld __cnfn convert_char_sat_rtz(char)
uchar4 __ovld __cnfn convert_uchar4_rtp(char4)
int __ovld __cnfn isunordered(float x, float y)
Test if arguments are unordered.
char4 __ovld __cnfn convert_char4_sat_rtp(char4)
void __ovld vstore_half2_rtn(float2 data, size_t offset, half *p)
int __ovld atomic_fetch_xor(volatile atomic_int *object, int operand)
ulong2 __ovld __cnfn convert_ulong2_rtz(char2)
long3 __ovld __cnfn convert_long3_rtp(char3)
char __ovld __cnfn mad_hi(char a, char b, char c)
Returns mul_hi(a, b) + c.
uchar3 __ovld __cnfn convert_uchar3_sat(char3)
ulong __ovld __cnfn convert_ulong_sat_rte(char)
i32 captured_struct **param SharedsTy A type which contains references the shared variables *param Shareds Context with the list of shared variables from the p *TaskFunction *param Data Additional data for task generation like final * state
uint __ovld __cnfn convert_uint_rte(char)
long8 __ovld __cnfn convert_long8_sat_rte(char8)
long8 __ovld __cnfn convert_long8(char8)
uint16 __ovld __cnfn convert_uint16_rtz(char16)
long __ovld __cnfn convert_long_sat_rtp(char)
void __ovld vstore3(char3 data, size_t offset, char *p)
uint4 __ovld __cnfn convert_uint4_rte(char4)
float __ovld __cnfn log2(float)
Compute a base 2 logarithm.
uchar16 __ovld __cnfn convert_uchar16_rtz(char16)
uchar4 __ovld __cnfn convert_uchar4_sat(char4)
float16 __ovld __cnfn convert_float16_rtn(char16)
uchar8 __ovld __cnfn convert_uchar8_sat_rte(char8)
float __ovld __cnfn powr(float x, float y)
Compute x to the power y, where x is >= 0.
uint __ovld __cnfn convert_uint_rtp(char)
short16 __ovld __cnfn convert_short16_rtn(char16)
float8 __ovld vloada_half8(size_t offset, const __constant half *p)
short16 __ovld __cnfn convert_short16_rtz(char16)
void __ovld vstore_half2_rtp(float2 data, size_t offset, half *p)
ulong4 __ovld __cnfn convert_ulong4_sat_rtz(char4)
float __ovld __cnfn round(float x)
Return the integral value nearest to x rounding halfway cases away from zero, regardless of the curre...
int __ovld __cnfn isless(float x, float y)
Returns the component-wise compare of x < y.
uchar16 __ovld __cnfn convert_uchar16_sat_rtn(char16)
uint16 __ovld __cnfn convert_uint16_sat_rte(char16)
float __ovld __cnfn tan(float)
Compute tangent.
char4 __ovld __cnfn convert_char4_rte(char4)
void __ovld vstorea_half8_rtp(float8 data, size_t offset, half *p)
void __ovld vstore_half_rtp(float data, size_t offset, half *p)
short8 __ovld __cnfn convert_short8_sat_rtz(char8)
ushort3 __ovld __cnfn convert_ushort3(char3)
void __ovld write_mem_fence(cl_mem_fence_flags flags)
Write memory barrier that orders only stores.
uint3 __ovld __cnfn convert_uint3_sat(char3)
int __ovld atomic_xchg(volatile __global int *p, int val)
Swaps the old value stored at location p with new value given by val.
float4 __ovld __cnfn convert_float4_rtp(char4)
ulong16 __ovld __cnfn convert_ulong16_rtz(char16)
int4 __ovld __cnfn convert_int4_sat(char4)
int __ovld __conv work_group_scan_exclusive_max(int x)
float __ovld sincos(float x, float *cosval)
Compute sine and cosine of x.
float __ovld __cnfn rint(float)
Round to integral value (using round to nearest even rounding mode) in floating-point format...
long3 __ovld __cnfn convert_long3_rte(char3)
void __ovld vstore16(char16 data, size_t offset, char *p)
ushort4 __ovld __cnfn convert_ushort4_sat_rtp(char4)
long16 __ovld __cnfn convert_long16_sat_rtp(char16)
ulong __ovld __cnfn convert_ulong_rtp(char)
uchar __ovld __cnfn convert_uchar_sat_rte(char)
short __ovld __cnfn convert_short_sat(char)
int4 __ovld __cnfn convert_int4_sat_rtz(char4)
uint16 __ovld __cnfn convert_uint16_rtp(char16)
bool __ovld atomic_flag_test_and_set(volatile atomic_flag *object)
int4 __ovld __cnfn convert_int4_rtp(char4)
float __ovld __cnfn degrees(float radians)
Converts radians to degrees, i.e.
int __ovld __conv work_group_scan_inclusive_max(int x)
void __ovld vstore2(char2 data, size_t offset, char *p)
ushort3 __ovld __cnfn convert_ushort3_sat(char3)
long16 __ovld __cnfn convert_long16_rtp(char16)
void __ovld vstore_half_rte(float data, size_t offset, half *p)
void __ovld vstorea_half4(float4 data, size_t offset, half *p)
long3 __ovld __cnfn convert_long3(char3)
long8 __ovld __cnfn convert_long8_rtp(char8)
float4 __ovld __cnfn convert_float4(char4)
int4 __ovld __cnfn convert_int4_sat_rtp(char4)
uchar2 __ovld __cnfn convert_uchar2_rtp(char2)
ulong2 __ovld __cnfn convert_ulong2_rtn(char2)
char16 __ovld __cnfn convert_char16_rtz(char16)
uchar8 __ovld __cnfn convert_uchar8(char8)
uint4 __ovld __cnfn convert_uint4_sat_rte(char4)
int __ovld __cnfn convert_int(char)
void __ovld vstore_half8_rtz(float8 data, size_t offset, half *p)
int2 __ovld __cnfn convert_int2_sat_rte(char2)
char __ovld __cnfn convert_char_rte(char)
void __ovld vstore4(char4 data, size_t offset, char *p)
float3 __ovld __cnfn convert_float3_rtz(char3)
ulong16 __ovld __cnfn convert_ulong16_rtn(char16)
int16 __ovld __cnfn convert_int16_sat_rtz(char16)
ushort8 __ovld __cnfn convert_ushort8_sat(char8)
int __ovld __conv work_group_broadcast(int a, size_t local_id)
char3 __ovld __cnfn convert_char3_sat(char3)
int __ovld __cnfn isequal(float x, float y)
intn isequal (floatn x, floatn y) Returns the component-wise compare of x == y.
void __ovld vstore8(char8 data, size_t offset, char *p)
float __ovld __cnfn log1p(float x)
Compute a base e logarithm of (1.0 + x).
char8 __ovld __cnfn convert_char8_rtz(char8)
char __ovld __cnfn clz(char x)
Returns the number of leading 0-bits in x, starting at the most significant bit position.
float __ovld __cnfn exp10(float)
Exponential base 10 function.
__INTPTR_TYPE__ intptr_t
A signed integer type with the property that any valid pointer to void can be converted to this type...
float __ovld __cnfn half_exp10(float x)
Compute the base- 10 exponential of x.
int3 __ovld __cnfn convert_int3_rtz(char3)
ushort3 __ovld __cnfn convert_ushort3_sat_rtn(char3)
int8 __ovld __cnfn convert_int8_rtp(char8)
short3 __ovld __cnfn convert_short3_sat_rtp(char3)
event_t __ovld async_work_group_copy(__local char *dst, const __global char *src, size_t num_elements, event_t event)
Builtin functions to_global, to_local, and to_private need to be declared as Clang builtin functions ...
char16 __ovld __cnfn convert_char16_sat_rtn(char16)
int __ovld atomic_fetch_or(volatile atomic_int *object, int operand)
int __ovld atomic_fetch_min(volatile atomic_int *object, int operand)
uint2 __ovld __cnfn convert_uint2_sat_rtn(char2)
ulong8 __ovld __cnfn convert_ulong8_rtp(char8)
ushort4 __ovld __cnfn convert_ushort4_rte(char4)
int4 __ovld __cnfn convert_int4(char4)
uint __ovld __cnfn convert_uint(char)
void __ovld vstore_half_rtn(float data, size_t offset, half *p)
int2 __ovld __cnfn convert_int2_rtp(char2)
long2 __ovld __cnfn convert_long2_sat_rtn(char2)
float __ovld __cnfn convert_float_rtz(char)
int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, int2 coord)
void __ovld vstore_half4_rtp(float4 data, size_t offset, half *p)
char __ovld __cnfn mul_hi(char x, char y)
Computes x * y and returns the high half of the product of x and y.
uchar3 __ovld __cnfn convert_uchar3_sat_rtn(char3)
int __ovld __conv work_group_all(int predicate)
Return the number of samples associated with image.
int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order)
short8 __ovld __cnfn convert_short8_rtn(char8)
void __ovld vstore_half16(float16 data, size_t offset, half *p)
void __ovld vstorea_half4_rtn(float4 data, size_t offset, half *p)
long2 __ovld __cnfn convert_long2_sat_rte(char2)
int3 __ovld __cnfn convert_int3_sat(char3)
float __ovld __cnfn log(float)
Compute natural logarithm.
uint3 __ovld __cnfn convert_uint3_sat_rtn(char3)
int __ovld __cnfn isnotequal(float x, float y)
Returns the component-wise compare of x != y.
ushort4 __ovld __cnfn convert_ushort4_sat_rtn(char4)
long8 __ovld __cnfn convert_long8_sat_rtn(char8)
uint8 __ovld __cnfn convert_uint8_rtp(char8)
uchar8 __ovld __cnfn convert_uchar8_rtn(char8)
ushort16 __ovld __cnfn convert_ushort16_sat(char16)
float __ovld modf(float x, float *iptr)
Decompose a floating-point number.
size_t __ovld get_enqueued_local_size(uint dimindx)
ushort2 __ovld __cnfn convert_ushort2_rte(char2)
float4 __ovld __cnfn cross(float4 p0, float4 p1)
Returns the cross product of p0.xyz and p1.xyz.
float3 __ovld __cnfn convert_float3_rte(char3)
int3 __ovld __cnfn convert_int3(char3)
void __ovld mem_fence(cl_mem_fence_flags flags)
Orders loads and stores of a work-item executing a kernel.
ushort3 __ovld __cnfn convert_ushort3_rtn(char3)
ushort4 __ovld __cnfn convert_ushort4_rtn(char4)
long3 __ovld __cnfn convert_long3_rtn(char3)
float16 __ovld __cnfn convert_float16_rtp(char16)
char2 __ovld __cnfn convert_char2(char2)
char2 __ovld __cnfn shuffle(char2 x, uchar2 mask)
The shuffle and shuffle2 built-in functions construct a permutation of elements from one or two input...
float __ovld __cnfn exp(float x)
Compute the base e exponential function of x.
int8 __ovld __cnfn convert_int8(char8)
float4 __ovld vloada_half4(size_t offset, const __constant half *p)
char __ovld __cnfn mad_sat(char a, char b, char c)
Returns a * b + c and saturates the result.
int2 __ovld __cnfn convert_int2_rtn(char2)
float2 __ovld vloada_half2(size_t offset, const __constant half *p)
short __ovld __cnfn convert_short_rtp(char)
int __ovld atomic_inc(volatile __global int *p)
Read the 32-bit value (referred to as old) stored at location pointed by p.
int __ovld atomic_and(volatile __global int *p, int val)
Read the 32-bit value (referred to as old) stored at location pointed by p.
float __ovld fract(float x, float *iptr)
Returns fmin(x - floor (x), 0x1.fffffep-1f ).
uchar __ovld __cnfn convert_uchar_rtn(char)
short4 __ovld __cnfn convert_short4_rtn(char4)
uchar2 __ovld __cnfn convert_uchar2_sat_rtp(char2)
char4 __ovld vload4(size_t offset, const __constant char *p)
float __ovld __cnfn smoothstep(float edge0, float edge1, float x)
Returns 0.0 if x <= edge0 and 1.0 if x >= edge1 and performs smooth Hermite interpolation between 0 a...
uint2 __ovld __cnfn convert_uint2_sat(char2)
ushort __ovld __cnfn convert_ushort_sat_rtp(char)
uint __ovld __cnfn convert_uint_rtn(char)
float __ovld __cnfn convert_float_rtn(char)
char4 __ovld __cnfn convert_char4_sat_rtn(char4)
void __ovld vstore_half3(float3 data, size_t offset, half *p)
char2 __ovld __cnfn convert_char2_rtn(char2)
size_t __ovld __cnfn get_local_id(uint dimindx)
Returns the unique local work-item ID i.e.
char __ovld __cnfn convert_char_rtp(char)
uint2 __ovld __cnfn convert_uint2_sat_rtp(char2)
float __ovld __cnfn acos(float)
Arc cosine function.
uchar8 __ovld __cnfn convert_uchar8_sat_rtp(char8)
void __ovld write_imagef(write_only image2d_t image, int2 coord, float4 color)
Write color value to location specified by coordinate (coord.x, coord.y) in the 2D image object speci...
uint4 __ovld __cnfn convert_uint4_sat_rtz(char4)
char16 __ovld __cnfn convert_char16_sat(char16)
int __ovld __cnfn islessgreater(float x, float y)
Returns the component-wise compare of (x < y) || (x > y) .
long16 __ovld __cnfn convert_long16_sat_rte(char16)
uchar4 __ovld __cnfn convert_uchar4_rtn(char4)
void __ovld __conv work_group_barrier(cl_mem_fence_flags flags, memory_scope scope)
short4 __ovld __cnfn convert_short4(char4)
float8 __ovld vload_half8(size_t offset, const __constant half *p)
ushort16 __ovld __cnfn convert_ushort16_sat_rtp(char16)
ushort __ovld __cnfn convert_ushort_rte(char)
float8 __ovld __cnfn convert_float8(char8)
char2 __ovld __cnfn convert_char2_rtp(char2)
short3 __ovld __cnfn convert_short3_rte(char3)
long16 __ovld __cnfn convert_long16_rte(char16)
ushort8 __ovld __cnfn convert_ushort8_rtp(char8)
uchar4 __ovld __cnfn convert_uchar4_sat_rtz(char4)
float __ovld __cnfn rsqrt(float)
Compute inverse square root.
short16 __ovld __cnfn convert_short16_rtp(char16)
ulong16 __ovld __cnfn convert_ulong16_sat_rtp(char16)
void __ovld atomic_work_item_fence(cl_mem_fence_flags flags, memory_order order, memory_scope scope)
float2 __ovld __cnfn convert_float2(char2)
uint3 __ovld __cnfn convert_uint3_rtz(char3)
float __ovld __cnfn fabs(float)
Compute absolute value of a floating-point number.
char16 __ovld __cnfn convert_char16_rtp(char16)
uchar2 __ovld __cnfn convert_uchar2_sat_rtz(char2)
float2 __ovld vload_half2(size_t offset, const __constant half *p)
Read sizeof (halfn) bytes of data from address (p + (offset * n)).
long __ovld __cnfn convert_long_rtz(char)
void __ovld vstorea_half2_rtn(float2 data, size_t offset, half *p)
int3 __ovld __cnfn convert_int3_rtp(char3)
int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order)
ulong4 __ovld __cnfn convert_ulong4_rtp(char4)
short3 __ovld __cnfn convert_short3_rtn(char3)
size_t __ovld __cnfn get_global_size(uint dimindx)
Returns the number of global work-items specified for dimension identified by dimindx.
ulong16 __ovld __cnfn convert_ulong16_sat(char16)
void __ovld vstorea_half8_rte(float8 data, size_t offset, half *p)
uchar __ovld __cnfn abs_diff(char x, char y)
Returns | x - y | without modulo overflow.
long __ovld __cnfn convert_long_sat_rtz(char)
ulong16 __ovld __cnfn convert_ulong16(char16)
short3 __ovld __cnfn convert_short3_sat(char3)
void __ovld vstore_half16_rte(float16 data, size_t offset, half *p)
char __ovld __cnfn min(char x, char y)
Returns y if y < x, otherwise it returns x.
uchar3 __ovld __cnfn convert_uchar3_rtz(char3)
uchar __ovld __cnfn convert_uchar_rte(char)
long4 __ovld __cnfn convert_long4_sat_rtn(char4)
int __ovld __conv work_group_reduce_min(int x)
unsigned int workDimension
cl_mem_fence_flags __ovld get_fence(const void *ptr)
char3 __ovld __cnfn convert_char3_sat_rtp(char3)
ushort4 __ovld __cnfn convert_ushort4(char4)
long2 __ovld __cnfn convert_long2_rte(char2)
int2 __ovld __cnfn convert_int2(char2)
long __ovld __cnfn convert_long(char)
float __ovld __cnfn native_recip(float x)
Compute reciprocal over an implementation-defined range.
void __ovld vstore_half3_rte(float3 data, size_t offset, half *p)
float __ovld __cnfn asin(float)
Arc sine function.
ulong16 __ovld __cnfn convert_ulong16_rte(char16)
long4 __ovld __cnfn convert_long4_sat(char4)
ushort16 __ovld __cnfn convert_ushort16(char16)
float16 __ovld vload_half16(size_t offset, const __constant half *p)
short3 __ovld __cnfn convert_short3(char3)
void __ovld vstorea_half8_rtz(float8 data, size_t offset, half *p)
int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order)
uchar __ovld __cnfn abs(char x)
Returns | x |.
int8 __ovld __cnfn convert_int8_sat(char8)
int2 __ovld __cnfn convert_int2_rtz(char2)
uint8 __ovld __cnfn convert_uint8_sat_rtz(char8)
float __ovld __cnfn native_sqrt(float x)
Compute square root over an implementation-defined range.
int __ovld atomic_add(volatile __global int *p, int val)
Read the 32-bit value (referred to as old) stored at location pointed by p.
float8 __ovld __cnfn convert_float8_rtz(char8)
ushort2 __ovld __cnfn convert_ushort2_sat_rte(char2)
int __ovld __cnfn convert_int_rtz(char)
ulong3 __ovld __cnfn convert_ulong3_rte(char3)
ulong3 __ovld __cnfn convert_ulong3_rtz(char3)
char3 __ovld __cnfn convert_char3_rtz(char3)
uint __ovld __cnfn get_work_dim(void)
Returns the number of dimensions in use.
long2 __ovld __cnfn convert_long2_sat(char2)
float __ovld __cnfn length(float p)
Return the length of vector p, i.e., sqrt(p.x2 + p.y 2 + ...)
float __ovld __cnfn acosh(float)
Inverse hyperbolic cosine.
ulong __ovld __cnfn convert_ulong_rtn(char)
short __ovld __cnfn convert_short_sat_rtz(char)
__UINTPTR_TYPE__ uintptr_t
An unsigned integer type with the property that any valid pointer to void can be converted to this ty...
uint2 __ovld __cnfn convert_uint2_rtn(char2)
float __ovld __cnfn hypot(float x, float y)
Compute the value of the square root of x^2 + y^2 without undue overflow or underflow.
uint8 __ovld __cnfn convert_uint8_rte(char8)
float __ovld __cnfn atan2(float y, float x)
Arc tangent of y / x.
float __ovld remquo(float x, float y, int *quo)
The remquo function computes the value r such that r = x - n*y, where n is the integer nearest the ex...
int __ovld atomic_exchange(volatile atomic_int *object, int desired)
char8 __ovld __cnfn convert_char8_rtn(char8)
uint8 __ovld __cnfn convert_uint8_sat_rte(char8)
void __ovld vstorea_half3(float3 data, size_t offset, half *p)
int16 __ovld __cnfn convert_int16_rte(char16)
char16 __ovld __cnfn convert_char16_rte(char16)
long __ovld __cnfn convert_long_rtn(char)
ushort4 __ovld __cnfn convert_ushort4_sat(char4)
int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order)
ushort2 __ovld __cnfn convert_ushort2_sat_rtn(char2)
char4 __ovld __cnfn convert_char4_sat(char4)
char16 __ovld __cnfn convert_char16_sat_rte(char16)
float __ovld __cnfn acospi(float x)
Compute acos (x) / PI.
uint16 __ovld __cnfn convert_uint16_rtn(char16)
float2 __ovld __cnfn convert_float2_rtn(char2)
float __ovld __cnfn half_exp2(float x)
Compute the base- 2 exponential of x.
float4 __ovld __cnfn convert_float4_rte(char4)
long4 __ovld __cnfn convert_long4_rte(char4)
uint16 __ovld __cnfn convert_uint16_sat_rtp(char16)
uint3 __ovld __cnfn convert_uint3_sat_rte(char3)
uint4 __ovld __cnfn convert_uint4_sat(char4)
int3 __ovld __cnfn convert_int3_sat_rte(char3)
ulong3 __ovld __cnfn convert_ulong3_rtp(char3)
float __ovld __cnfn native_log(float x)
Compute natural logarithm over an implementationdefined range.
float __ovld __cnfn rootn(float x, int y)
Compute x to the power 1/y.
void __ovld vstorea_half_rte(float data, size_t offset, half *p)
short __ovld __cnfn upsample(char hi, uchar lo)
result[i] = ((short)hi[i] << 8) | lo[i] result[i] = ((ushort)hi[i] << 8) | lo[i]
char __ovld __cnfn bitselect(char a, char b, char c)
Each bit of the result is the corresponding bit of a if the corresponding bit of c is 0...
short __ovld __cnfn convert_short_rtn(char)
void __ovld vstorea_half4_rte(float4 data, size_t offset, half *p)
int __ovld __cnfn convert_int_sat_rtn(char)
void __ovld vstore_half2_rtz(float2 data, size_t offset, half *p)
char3 __ovld __cnfn convert_char3_sat_rte(char3)
uchar16 __ovld __cnfn convert_uchar16(char16)
uint4 __ovld __cnfn convert_uint4_sat_rtp(char4)
ushort8 __ovld __cnfn convert_ushort8(char8)
short8 __ovld __cnfn convert_short8(char8)
float __ovld __cnfn mix(float x, float y, float a)
Returns the linear blend of x & y implemented as: x + (y - x) * a a must be a value in the range 0...
char3 __ovld __cnfn convert_char3_sat_rtz(char3)
float __ovld __cnfn fmin(float x, float y)
Returns y if y < x, otherwise it returns x.
float4 __ovld __cnfn convert_float4_rtz(char4)
ushort16 __ovld __cnfn convert_ushort16_rte(char16)
void __ovld vstorea_half4_rtz(float4 data, size_t offset, half *p)
uchar2 __ovld __cnfn convert_uchar2(char2)
ulong __ovld __cnfn convert_ulong_sat_rtz(char)
char __ovld ctz(char x)
Returns the count of trailing 0-bits in x.
int8 __ovld __cnfn convert_int8_sat_rtp(char8)
ulong2 __ovld __cnfn convert_ulong2_rtp(char2)
char3 __ovld __cnfn convert_char3_rte(char3)
uint4 __ovld __cnfn convert_uint4_sat_rtn(char4)
char4 __ovld __cnfn convert_char4_rtz(char4)
bool __ovld is_valid_event(clk_event_t event)
ushort8 __ovld __cnfn convert_ushort8_sat_rte(char8)
uchar4 __ovld __cnfn convert_uchar4_sat_rtn(char4)
void __ovld vstorea_half3_rtp(float3 data, size_t offset, half *p)
float __ovld __cnfn sinh(float)
Compute hyperbolic sine.
char2 __ovld vload2(size_t offset, const __constant char *p)
Use generic type gentype to indicate the built-in data types char, uchar, short, ushort, int, uint, long, ulong, float, double or half.
int __ovld __conv work_group_reduce_add(int x)
long16 __ovld __cnfn convert_long16_rtn(char16)
ushort3 __ovld __cnfn convert_ushort3_sat_rtp(char3)
ulong8 __ovld __cnfn convert_ulong8_rtn(char8)
long4 __ovld __cnfn convert_long4_sat_rtp(char4)
int __ovld __conv work_group_any(int predicate)
int __ovld __cnfn any(char x)
Returns 1 if the most significant bit in any component of x is set; otherwise returns 0...
void __ovld set_user_event_status(clk_event_t e, int state)
char __ovld __cnfn convert_char(char)
unsigned int uint
An unsigned 32-bit integer.
int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order)
short16 __ovld __cnfn convert_short16(char16)
uchar16 __ovld __cnfn convert_uchar16_rte(char16)
float __ovld __cnfn half_cos(float x)
Compute cosine.
size_t __ovld __cnfn get_num_groups(uint dimindx)
Returns the number of work-groups that will execute a kernel for dimension identified by dimindx...
ushort3 __ovld __cnfn convert_ushort3_sat_rte(char3)
float __ovld __cnfn native_sin(float x)
Compute sine over an implementation-defined range.
void __ovld vstorea_half16(float16 data, size_t offset, half *p)
float __ovld __cnfn native_log10(float x)
Compute a base 10 logarithm over an implementationdefined range.
short8 __ovld __cnfn convert_short8_rte(char8)
ushort4 __ovld __cnfn convert_ushort4_sat_rtz(char4)
size_t __ovld __cnfn get_image_array_size(read_only image1d_array_t image_array)
Return the image array size.
float __ovld frexp(float x, int *exp)
Extract mantissa and exponent from x.
long2 __ovld __cnfn convert_long2_sat_rtp(char2)
char8 __ovld __cnfn convert_char8_sat_rte(char8)
float __ovld __cnfn maxmag(float x, float y)
Returns x if | x | > | y |, y if | y | > | x |, otherwise fmax(x, y).
int __ovld __cnfn get_image_height(read_only image2d_t image)
Return the image height in pixels.
char3 __ovld __cnfn convert_char3_rtp(char3)
uint4 __ovld __cnfn convert_uint4_rtp(char4)
uchar8 __ovld __cnfn convert_uchar8_sat_rtz(char8)
char3 __ovld vload3(size_t offset, const __constant char *p)
int __ovld __conv work_group_scan_inclusive_add(int x)
uint2 __ovld __cnfn convert_uint2_rte(char2)
int __ovld atomic_fetch_max(volatile atomic_int *object, int operand)
char __ovld __cnfn select(char a, char b, char c)
For each component of a vector type, result[i] = if MSB of c[i] is set ? b[i] : a[i].
ulong __ovld __cnfn convert_ulong_sat(char)
void __ovld atomic_init(volatile atomic_int *object, int value)
float __ovld __cnfn sqrt(float)
Compute square root.
int __ovld __cnfn get_image_depth(read_only image3d_t image)
Return the image depth in pixels.
uint __ovld __cnfn convert_uint_rtz(char)
long3 __ovld __cnfn convert_long3_sat_rtp(char3)
void __ovld vstore_half4(float4 data, size_t offset, half *p)
float __ovld __cnfn fast_normalize(float p)
Returns a vector in the same direction as p but with a length of 1.
uint3 __ovld __cnfn convert_uint3_sat_rtz(char3)
long __ovld __cnfn convert_long_sat_rtn(char)
char __ovld __cnfn rotate(char v, char i)
For each element in v, the bits are shifted left by the number of bits given by the corresponding ele...
char16 __ovld __cnfn convert_char16_sat_rtz(char16)
short2 __ovld __cnfn convert_short2(char2)
uint __ovld __cnfn convert_uint_sat(char)
float __ovld __cnfn native_cos(float x)
Compute cosine over an implementation-defined range.
ulong8 __ovld __cnfn convert_ulong8_sat_rtz(char8)
ulong16 __ovld __cnfn convert_ulong16_rtp(char16)
char8 __ovld __cnfn convert_char8(char8)
__PTRDIFF_TYPE__ ptrdiff_t
A signed integer type that is the result of subtracting two pointers.
uchar3 __ovld __cnfn convert_uchar3_sat_rte(char3)
float __ovld __cnfn ldexp(float x, int n)
Multiply x by 2 to the power n.
ushort2 __ovld __cnfn convert_ushort2(char2)
size_t __ovld __cnfn get_global_offset(uint dimindx)
get_global_offset returns the offset values specified in global_work_offset argument to clEnqueueNDRa...
short16 __ovld __cnfn convert_short16_sat_rtz(char16)
uint2 __ovld __cnfn convert_uint2(char2)
ulong2 __ovld __cnfn convert_ulong2_rte(char2)
int16 __ovld __cnfn convert_int16_rtz(char16)
int8 __ovld __cnfn convert_int8_rte(char8)
uchar3 __ovld __cnfn convert_uchar3(char3)
ulong4 __ovld __cnfn convert_ulong4(char4)
int2 __ovld __cnfn convert_int2_rte(char2)
ulong8 __ovld __cnfn convert_ulong8_sat(char8)
float __ovld __cnfn convert_float_rte(char)
void __ovld vstorea_half16_rtz(float16 data, size_t offset, half *p)
float3 __ovld __cnfn convert_float3(char3)
float __ovld __cnfn half_sin(float x)
Compute sine.
uchar16 __ovld __cnfn convert_uchar16_rtp(char16)
uint2 __ovld __cnfn convert_uint2_rtp(char2)
char8 __ovld __cnfn convert_char8_sat_rtn(char8)
ulong3 __ovld __cnfn convert_ulong3_sat_rtz(char3)
short2 __ovld __cnfn convert_short2_rtp(char2)
ulong3 __ovld __cnfn convert_ulong3_sat(char3)
float __ovld vload_half(size_t offset, const __constant half *p)
Read sizeof (half) bytes of data from address (p + offset).
int2 __ovld __cnfn convert_int2_sat(char2)
int __ovld __conv work_group_scan_exclusive_add(int x)
ushort2 __ovld __cnfn convert_ushort2_rtn(char2)
void __ovld vstorea_half3_rte(float3 data, size_t offset, half *p)
int __ovld __cnfn get_image_channel_data_type(read_only image1d_t image)
uchar3 __ovld __cnfn convert_uchar3_rte(char3)
short __ovld __cnfn convert_short_sat_rtn(char)
ushort8 __ovld __cnfn convert_ushort8_rtn(char8)
float __ovld __cnfn half_powr(float x, float y)
Compute x to the power y, where x is >= 0.
char3 __ovld __cnfn convert_char3_sat_rtn(char3)
short3 __ovld __cnfn convert_short3_rtp(char3)
uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, int2 coord)
float __ovld __cnfn native_divide(float x, float y)
Compute x / y over an implementation-defined range.
char __ovld __cnfn sub_sat(char x, char y)
Returns x - y and saturates the result.
uint8 __ovld __cnfn convert_uint8_sat_rtp(char8)
int4 __ovld __cnfn convert_int4_rtz(char4)
uchar16 __ovld __cnfn convert_uchar16_sat_rtp(char16)
float4 __ovld __cnfn convert_float4_rtn(char4)
float __ovld __cnfn dot(float p0, float p1)
Compute dot product.
char __ovld __cnfn convert_char_sat_rtp(char)
uint8 __ovld __cnfn convert_uint8_rtz(char8)
long __ovld __cnfn convert_long_sat(char)
ushort2 __ovld __cnfn convert_ushort2_rtz(char2)
int __ovld __cnfn convert_int_rtn(char)
int kernel_enqueue_flags_t
long8 __ovld __cnfn convert_long8_sat_rtz(char8)
float2 __ovld __cnfn convert_float2_rte(char2)
long3 __ovld __cnfn convert_long3_sat_rte(char3)
long4 __ovld __cnfn convert_long4(char4)
int4 __ovld __cnfn convert_int4_sat_rte(char4)
ushort __ovld __cnfn convert_ushort_rtz(char)
short8 __ovld __cnfn convert_short8_sat_rte(char8)
float __ovld __cnfn fdim(float x, float y)
x - y if x > y, +0 if x is less than or equal to y.
int __ovld __cnfn convert_int_sat_rtz(char)
ulong3 __ovld __cnfn convert_ulong3_rtn(char3)
ndrange_t __ovld ndrange_3D(const size_t[3])
int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order)
long8 __ovld __cnfn convert_long8_rte(char8)
size_t __ovld __cnfn get_local_size(uint dimindx)
Returns the number of local work-items specified in dimension identified by dimindx.
ushort8 __ovld __cnfn convert_ushort8_rtz(char8)
ulong3 __ovld __cnfn convert_ulong3_sat_rte(char3)
ushort16 __ovld __cnfn convert_ushort16_rtp(char16)
char8 __ovld __cnfn convert_char8_sat_rtz(char8)
bool __ovld is_valid_reserve_id(reserve_id_t reserve_id)
int printf(__constant const char *st,...)
long4 __ovld __cnfn convert_long4_rtp(char4)
ulong2 __ovld __cnfn convert_ulong2_sat(char2)
float __ovld __cnfn half_log2(float x)
Compute a base 2 logarithm.
char char2 __attribute__((ext_vector_type(2)))
float __ovld __cnfn sign(float x)
Returns 1.0 if x > 0, -0.0 if x = -0.0, +0.0 if x = +0.0, or -1.0 if x < 0.
int2 __ovld __cnfn convert_int2_sat_rtn(char2)
long16 __ovld __cnfn convert_long16_rtz(char16)
float __ovld __cnfn convert_float_rtp(char)
void __ovld vstore_half2(float2 data, size_t offset, half *p)
The floatn value given by data is converted to a halfn value using the appropriate rounding mode...
float __ovld __cnfn fma(float a, float b, float c)
Returns the correctly rounded floating-point representation of the sum of c with the infinitely preci...
ulong4 __ovld __cnfn convert_ulong4_rte(char4)
float __ovld __cnfn fast_distance(float p0, float p1)
Returns fast_length(p0 - p1).
int __ovld __cnfn convert_int_sat(char)
uchar8 __ovld __cnfn convert_uchar8_rtp(char8)
short2 __ovld __cnfn convert_short2_sat_rtn(char2)
short8 __ovld __cnfn convert_short8_sat(char8)
int8 __ovld __cnfn convert_int8_sat_rte(char8)
ushort8 __ovld __cnfn convert_ushort8_sat_rtz(char8)
int16 __ovld __cnfn convert_int16_sat_rte(char16)
short3 __ovld __cnfn convert_short3_rtz(char3)
float __ovld __cnfn half_tan(float x)
Compute tangent.
uint4 __ovld __cnfn convert_uint4_rtn(char4)
event_t __ovld async_work_group_strided_copy(__local char *dst, const __global char *src, size_t num_elements, size_t src_stride, event_t event)
Perform an async gather of num_elements gentype elements from src to dst.
short __ovld __cnfn convert_short_rte(char)
short2 __ovld __cnfn convert_short2_sat_rtp(char2)
long16 __ovld __cnfn convert_long16_sat_rtn(char16)
ulong8 __ovld __cnfn convert_ulong8(char8)
void __ovld vstore_half4_rte(float4 data, size_t offset, half *p)
char2 __ovld __cnfn convert_char2_sat_rtz(char2)
short3 __ovld __cnfn convert_short3_sat_rtz(char3)
ulong4 __ovld __cnfn convert_ulong4_rtn(char4)
char __ovld __cnfn convert_char_rtz(char)
short4 __ovld __cnfn convert_short4_sat(char4)
short4 __ovld __cnfn convert_short4_sat_rte(char4)
ulong2 __ovld __cnfn convert_ulong2_sat_rtp(char2)
float __ovld __cnfn step(float edge, float x)
Returns 0.0 if x < edge, otherwise it returns 1.0.
ulong8 __ovld __cnfn convert_ulong8_sat_rtp(char8)
uchar __ovld __cnfn convert_uchar_sat(char)
float __ovld __cnfn exp2(float)
Exponential base 2 function.
float __ovld __cnfn cosh(float)
Compute hyperbolic cosine.
char __ovld __cnfn convert_char_sat(char)
ushort4 __ovld __cnfn convert_ushort4_rtp(char4)
float16 __ovld vloada_half16(size_t offset, const __constant half *p)
ushort3 __ovld __cnfn convert_ushort3_sat_rtz(char3)
float16 __ovld __cnfn convert_float16(char16)
void __ovld atomic_store(volatile atomic_int *object, int desired)
int3 __ovld __cnfn convert_int3_rtn(char3)
short8 __ovld __cnfn convert_short8_sat_rtn(char8)
float __ovld __cnfn floor(float)
Round to integral value using the round to -ve infinity rounding mode.
uint2 __ovld __cnfn convert_uint2_sat_rte(char2)
void __ovld vstorea_half8(float8 data, size_t offset, half *p)
long8 __ovld __cnfn convert_long8_sat_rtp(char8)
long2 __ovld __cnfn convert_long2_rtn(char2)
int16 __ovld __cnfn convert_int16(char16)
ushort2 __ovld __cnfn convert_ushort2_sat_rtz(char2)
ushort4 __ovld __cnfn convert_ushort4_sat_rte(char4)
ushort3 __ovld __cnfn convert_ushort3_rtp(char3)
void __ovld vstore_half3_rtp(float3 data, size_t offset, half *p)
long3 __ovld __cnfn convert_long3_sat(char3)
char __ovld __cnfn max(char x, char y)
Returns y if x < y, otherwise it returns x.
void __ovld vstorea_half2_rtz(float2 data, size_t offset, half *p)
short8 __ovld __cnfn convert_short8_rtz(char8)
float __ovld __cnfn lgamma(float x)
Log gamma function.
void __ovld vstore_half16_rtp(float16 data, size_t offset, half *p)
uchar4 __ovld __cnfn convert_uchar4_sat_rtp(char4)
uchar2 __ovld __cnfn convert_uchar2_rtn(char2)
ushort __ovld __cnfn convert_ushort_sat_rtz(char)
int __ovld __cnfn convert_int_sat_rte(char)
short2 __ovld __cnfn convert_short2_rtn(char2)
float2 __ovld __cnfn convert_float2_rtz(char2)
short2 __ovld __cnfn convert_short2_sat_rte(char2)
uchar __ovld __cnfn convert_uchar_sat_rtn(char)
float __ovld __cnfn atan2pi(float y, float x)
Compute atan2 (y, x) / PI.
uint16 __ovld __cnfn convert_uint16_sat_rtn(char16)
void __ovld vstorea_half(float data, size_t offset, half *p)
The floatn value given by data is converted to a halfn value using the appropriate rounding mode...
int4 __ovld __cnfn convert_int4_rtn(char4)
int __ovld __cnfn convert_int_sat_rtp(char)
uint16 __ovld __cnfn convert_uint16_sat_rtz(char16)
short __ovld __cnfn convert_short(char)
float __ovld lgamma_r(float x, int *signp)
ulong2 __ovld __cnfn convert_ulong2_sat_rtn(char2)
ushort8 __ovld __cnfn convert_ushort8_sat_rtp(char8)
unsigned char uchar
An unsigned 8-bit integer.
int8 __ovld __cnfn convert_int8_sat_rtz(char8)
uchar16 __ovld __cnfn convert_uchar16_sat(char16)
ulong3 __ovld __cnfn convert_ulong3_sat_rtn(char3)
void __ovld vstorea_half16_rte(float16 data, size_t offset, half *p)
float __ovld __cnfn atanpi(float x)
Compute atan (x) / PI.
void __ovld vstore_half16_rtn(float16 data, size_t offset, half *p)
ulong __ovld __cnfn convert_ulong_rte(char)
void __ovld retain_event(clk_event_t)
short16 __ovld __cnfn convert_short16_sat_rte(char16)
char4 __ovld __cnfn convert_char4_rtp(char4)
int16 __ovld __cnfn convert_int16_rtn(char16)
short16 __ovld __cnfn convert_short16_sat(char16)
void __ovld vstore_half2_rte(float2 data, size_t offset, half *p)
int3 __ovld __cnfn convert_int3_sat_rtp(char3)
void __ovld vstorea_half_rtn(float data, size_t offset, half *p)
int __ovld atomic_dec(volatile __global int *p)
Read the 32-bit value (referred to as old) stored at location pointed by p.
char2 __ovld __cnfn shuffle2(char2 x, char2 y, uchar2 mask)
short4 __ovld __cnfn convert_short4_sat_rtz(char4)
float __ovld vloada_half(size_t offset, const __constant half *p)
For n = 1, 2, 4, 8 and 16 read sizeof (halfn) bytes of data from address (p + (offset * n))...
int __ovld __cnfn isgreaterequal(float x, float y)
Returns the component-wise compare of x >= y.
int __ovld enqueue_marker(queue_t, uint, const __private clk_event_t *, __private clk_event_t *)
void __ovld vstorea_half8_rtn(float8 data, size_t offset, half *p)
uchar8 __ovld __cnfn convert_uchar8_sat_rtn(char8)
long3 __ovld __cnfn convert_long3_rtz(char3)
short4 __ovld __cnfn convert_short4_rtz(char4)
float4 __ovld vload_half4(size_t offset, const __constant half *p)
uint8 __ovld __cnfn convert_uint8(char8)
uchar4 __ovld __cnfn convert_uchar4(char4)
ulong8 __ovld __cnfn convert_ulong8_sat_rtn(char8)
float3 __ovld __cnfn convert_float3_rtp(char3)
ulong16 __ovld __cnfn convert_ulong16_sat_rtz(char16)
short __ovld __cnfn convert_short_sat_rte(char)
uint3 __ovld __cnfn convert_uint3_rtp(char3)
uchar2 __ovld __cnfn convert_uchar2_rtz(char2)
long16 __ovld __cnfn convert_long16(char16)
char __ovld __cnfn add_sat(char x, char y)
Returns x + y and saturates the result.
float __ovld __cnfn fast_length(float p)
Returns the length of vector p computed as: half_sqrt(p.x2 + p.y2 + ...)
long4 __ovld __cnfn convert_long4_rtn(char4)
int3 __ovld __cnfn convert_int3_sat_rtn(char3)
char __ovld __cnfn rhadd(char x, char y)
Returns (x + y + 1) >> 1.
void __ovld vstorea_half2(float2 data, size_t offset, half *p)
bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected, int desired, memory_order success, memory_order failure)
short2 __ovld __cnfn convert_short2_rtz(char2)
short4 __ovld __cnfn convert_short4_sat_rtn(char4)
char3 __ovld __cnfn convert_char3_rtn(char3)
int __ovld __cnfn isordered(float x, float y)
Test if arguments are ordered.
float __ovld __cnfn cos(float)
Compute cosine.
short2 __ovld __cnfn convert_short2_rte(char2)
long __ovld __cnfn convert_long_rte(char)
short4 __ovld __cnfn convert_short4_sat_rtp(char4)
int __ovld __cnfn isnan(float)
Test for a NaN.
int __ovld __cnfn get_image_channel_order(read_only image1d_t image)
void __ovld vstorea_half2_rte(float2 data, size_t offset, half *p)
long2 __ovld __cnfn convert_long2(char2)
void __ovld atomic_flag_clear(volatile atomic_flag *object)
int __ovld __cnfn isinf(float)
Test for infinity value (+ve or -ve) .
void __ovld vstorea_half2_rtp(float2 data, size_t offset, half *p)
uint __ovld __cnfn convert_uint_sat_rtp(char)
ushort __ovld __cnfn convert_ushort(char)
char8 __ovld __cnfn convert_char8_sat_rtp(char8)
int4 __ovld __cnfn convert_int4_rte(char4)
float8 __ovld __cnfn convert_float8_rtp(char8)
float __ovld __cnfn half_log(float x)
Compute natural logarithm.
ushort __ovld __cnfn convert_ushort_sat(char)
ulong8 __ovld __cnfn convert_ulong8_sat_rte(char8)
void __ovld vstore_half(float data, size_t offset, half *p)
The float value given by data is first converted to a half value using the appropriate rounding mode...
int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order)
int __ovld atomic_load(volatile atomic_int *object)
long3 __ovld __cnfn convert_long3_sat_rtn(char3)
ushort4 __ovld __cnfn convert_ushort4_rtz(char4)
char2 __ovld __cnfn convert_char2_sat_rtp(char2)
float __ovld __cnfn half_exp(float x)
Compute the base- e exponential of x.
int __ovld __cnfn isnormal(float)
Test for a normal value.
void __ovld vstore_half8_rte(float8 data, size_t offset, half *p)
long4 __ovld __cnfn convert_long4_sat_rte(char4)
ushort8 __ovld __cnfn convert_ushort8_rte(char8)
ushort8 __ovld __cnfn convert_ushort8_sat_rtn(char8)
int __ovld atomic_fetch_and(volatile atomic_int *object, int operand)
float __ovld __cnfn sinpi(float x)
Compute sin (PI * x).
int16 __ovld __cnfn convert_int16_sat_rtn(char16)
unsigned long ulong
An unsigned 64-bit integer.
float __ovld __cnfn atanh(float)
Hyperbolic arc tangent.
void __ovld wait_group_events(int num_events, event_t *event_list)
Wait for events that identify the async_work_group_copy operations to complete.
size_t __ovld get_local_linear_id(void)
short4 __ovld __cnfn convert_short4_rte(char4)
char4 __ovld __cnfn convert_char4_sat_rte(char4)
int __ovld atomic_sub(volatile __global int *p, int val)
Read the 32-bit value (referred to as old) stored at location pointed by p.
float __ovld __cnfn half_recip(float x)
Compute reciprocal.
ushort2 __ovld __cnfn convert_ushort2_sat(char2)
ulong __ovld __cnfn convert_ulong(char)
float __ovld __cnfn pow(float x, float y)
Compute x to the power y.
int8 __ovld __cnfn convert_int8_sat_rtn(char8)
void __ovld release_event(clk_event_t)
char __ovld __cnfn convert_char_rtn(char)
ushort16 __ovld __cnfn convert_ushort16_sat_rtz(char16)
int __ovld __cnfn convert_int_rtp(char)
float16 __ovld __cnfn convert_float16_rte(char16)
int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order)
void __ovld vstore_half4_rtn(float4 data, size_t offset, half *p)
short __ovld __cnfn convert_short_sat_rtp(char)
float8 __ovld __cnfn convert_float8_rtn(char8)
void __ovld vstorea_half3_rtn(float3 data, size_t offset, half *p)
uchar16 __ovld __cnfn convert_uchar16_sat_rtz(char16)
int __ovld atomic_max(volatile __global int *p, int val)
Read the 32-bit value (referred to as old) stored at location pointed by p.
uint8 __ovld __cnfn convert_uint8_sat_rtn(char8)
float3 __ovld vloada_half3(size_t offset, const __constant half *p)
float __ovld __cnfn convert_float(char)
char2 __ovld __cnfn convert_char2_sat_rtn(char2)