13 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 14 #ifndef cl_khr_depth_images 15 #define cl_khr_depth_images 16 #endif //cl_khr_depth_images 17 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 19 #if __OPENCL_C_VERSION__ < CL_VERSION_2_0 20 #ifdef cl_khr_3d_image_writes 21 #pragma OPENCL EXTENSION cl_khr_3d_image_writes : enable 22 #endif //cl_khr_3d_image_writes 23 #endif //__OPENCL_C_VERSION__ < CL_VERSION_2_0 25 #define __ovld __attribute__((overloadable)) 26 #define __conv __attribute__((convergent)) 29 #define __purefn __attribute__((pure)) 30 #define __cnfn __attribute__((const)) 131 #pragma OPENCL EXTENSION cl_khr_fp16 : enable 139 #if __OPENCL_C_VERSION__ < CL_VERSION_1_2 140 #pragma OPENCL EXTENSION cl_khr_fp64 : enable 146 typedef double double16
__attribute__((ext_vector_type(16)));
149 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 150 #define NULL ((void*)0) 157 #define MAXFLOAT 0x1.fffffep127f 164 #define HUGE_VALF (__builtin_huge_valf()) 171 #define HUGE_VAL (__builtin_huge_val()) 177 #define INFINITY (__builtin_inff()) 182 #define NAN as_float(INT_MAX) 184 #define FP_ILOGB0 INT_MIN 185 #define FP_ILOGBNAN INT_MAX 188 #define FLT_MANT_DIG 24 189 #define FLT_MAX_10_EXP +38 190 #define FLT_MAX_EXP +128 191 #define FLT_MIN_10_EXP -37 192 #define FLT_MIN_EXP -125 194 #define FLT_MAX 0x1.fffffep127f 195 #define FLT_MIN 0x1.0p-126f 196 #define FLT_EPSILON 0x1.0p-23f 198 #define M_E_F 2.71828182845904523536028747135266250f 199 #define M_LOG2E_F 1.44269504088896340735992468100189214f 200 #define M_LOG10E_F 0.434294481903251827651128918916605082f 201 #define M_LN2_F 0.693147180559945309417232121458176568f 202 #define M_LN10_F 2.30258509299404568401799145468436421f 203 #define M_PI_F 3.14159265358979323846264338327950288f 204 #define M_PI_2_F 1.57079632679489661923132169163975144f 205 #define M_PI_4_F 0.785398163397448309615660845819875721f 206 #define M_1_PI_F 0.318309886183790671537767526745028724f 207 #define M_2_PI_F 0.636619772367581343075535053490057448f 208 #define M_2_SQRTPI_F 1.12837916709551257389615890312154517f 209 #define M_SQRT2_F 1.41421356237309504880168872420969808f 210 #define M_SQRT1_2_F 0.707106781186547524400844362104849039f 213 #define DBL_MANT_DIG 53 214 #define DBL_MAX_10_EXP +308 215 #define DBL_MAX_EXP +1024 216 #define DBL_MIN_10_EXP -307 217 #define DBL_MIN_EXP -1021 219 #define DBL_MAX 0x1.fffffffffffffp1023 220 #define DBL_MIN 0x1.0p-1022 221 #define DBL_EPSILON 0x1.0p-52 223 #define M_E 0x1.5bf0a8b145769p+1 224 #define M_LOG2E 0x1.71547652b82fep+0 225 #define M_LOG10E 0x1.bcb7b1526e50ep-2 226 #define M_LN2 0x1.62e42fefa39efp-1 227 #define M_LN10 0x1.26bb1bbb55516p+1 228 #define M_PI 0x1.921fb54442d18p+1 229 #define M_PI_2 0x1.921fb54442d18p+0 230 #define M_PI_4 0x1.921fb54442d18p-1 231 #define M_1_PI 0x1.45f306dc9c883p-2 232 #define M_2_PI 0x1.45f306dc9c883p-1 233 #define M_2_SQRTPI 0x1.20dd750429b6dp+0 234 #define M_SQRT2 0x1.6a09e667f3bcdp+0 235 #define M_SQRT1_2 0x1.6a09e667f3bcdp-1 240 #define HALF_MANT_DIG 11 241 #define HALF_MAX_10_EXP +4 242 #define HALF_MAX_EXP +16 243 #define HALF_MIN_10_EXP -4 244 #define HALF_MIN_EXP -13 246 #define HALF_MAX ((0x1.ffcp15h)) 247 #define HALF_MIN ((0x1.0p-14h)) 248 #define HALF_EPSILON ((0x1.0p-10h)) 250 #define M_E_H 2.71828182845904523536028747135266250h 251 #define M_LOG2E_H 1.44269504088896340735992468100189214h 252 #define M_LOG10E_H 0.434294481903251827651128918916605082h 253 #define M_LN2_H 0.693147180559945309417232121458176568h 254 #define M_LN10_H 2.30258509299404568401799145468436421h 255 #define M_PI_H 3.14159265358979323846264338327950288h 256 #define M_PI_2_H 1.57079632679489661923132169163975144h 257 #define M_PI_4_H 0.785398163397448309615660845819875721h 258 #define M_1_PI_H 0.318309886183790671537767526745028724h 259 #define M_2_PI_H 0.636619772367581343075535053490057448h 260 #define M_2_SQRTPI_H 1.12837916709551257389615890312154517h 261 #define M_SQRT2_H 1.41421356237309504880168872420969808h 262 #define M_SQRT1_2_H 0.707106781186547524400844362104849039h 267 #define SCHAR_MAX 127 268 #define SCHAR_MIN (-128) 269 #define UCHAR_MAX 255 270 #define CHAR_MAX SCHAR_MAX 271 #define CHAR_MIN SCHAR_MIN 272 #define USHRT_MAX 65535 273 #define SHRT_MAX 32767 274 #define SHRT_MIN (-32768) 275 #define UINT_MAX 0xffffffff 276 #define INT_MAX 2147483647 277 #define INT_MIN (-2147483647-1) 278 #define ULONG_MAX 0xffffffffffffffffUL 279 #define LONG_MAX 0x7fffffffffffffffL 280 #define LONG_MIN (-0x7fffffffffffffffL-1) 5698 #endif //cl_khr_fp64 6579 #endif //cl_khr_fp64 6581 #endif // cl_khr_fp16 6587 #define as_char(x) __builtin_astype((x), char) 6588 #define as_char2(x) __builtin_astype((x), char2) 6589 #define as_char3(x) __builtin_astype((x), char3) 6590 #define as_char4(x) __builtin_astype((x), char4) 6591 #define as_char8(x) __builtin_astype((x), char8) 6592 #define as_char16(x) __builtin_astype((x), char16) 6594 #define as_uchar(x) __builtin_astype((x), uchar) 6595 #define as_uchar2(x) __builtin_astype((x), uchar2) 6596 #define as_uchar3(x) __builtin_astype((x), uchar3) 6597 #define as_uchar4(x) __builtin_astype((x), uchar4) 6598 #define as_uchar8(x) __builtin_astype((x), uchar8) 6599 #define as_uchar16(x) __builtin_astype((x), uchar16) 6601 #define as_short(x) __builtin_astype((x), short) 6602 #define as_short2(x) __builtin_astype((x), short2) 6603 #define as_short3(x) __builtin_astype((x), short3) 6604 #define as_short4(x) __builtin_astype((x), short4) 6605 #define as_short8(x) __builtin_astype((x), short8) 6606 #define as_short16(x) __builtin_astype((x), short16) 6608 #define as_ushort(x) __builtin_astype((x), ushort) 6609 #define as_ushort2(x) __builtin_astype((x), ushort2) 6610 #define as_ushort3(x) __builtin_astype((x), ushort3) 6611 #define as_ushort4(x) __builtin_astype((x), ushort4) 6612 #define as_ushort8(x) __builtin_astype((x), ushort8) 6613 #define as_ushort16(x) __builtin_astype((x), ushort16) 6615 #define as_int(x) __builtin_astype((x), int) 6616 #define as_int2(x) __builtin_astype((x), int2) 6617 #define as_int3(x) __builtin_astype((x), int3) 6618 #define as_int4(x) __builtin_astype((x), int4) 6619 #define as_int8(x) __builtin_astype((x), int8) 6620 #define as_int16(x) __builtin_astype((x), int16) 6622 #define as_uint(x) __builtin_astype((x), uint) 6623 #define as_uint2(x) __builtin_astype((x), uint2) 6624 #define as_uint3(x) __builtin_astype((x), uint3) 6625 #define as_uint4(x) __builtin_astype((x), uint4) 6626 #define as_uint8(x) __builtin_astype((x), uint8) 6627 #define as_uint16(x) __builtin_astype((x), uint16) 6629 #define as_long(x) __builtin_astype((x), long) 6630 #define as_long2(x) __builtin_astype((x), long2) 6631 #define as_long3(x) __builtin_astype((x), long3) 6632 #define as_long4(x) __builtin_astype((x), long4) 6633 #define as_long8(x) __builtin_astype((x), long8) 6634 #define as_long16(x) __builtin_astype((x), long16) 6636 #define as_ulong(x) __builtin_astype((x), ulong) 6637 #define as_ulong2(x) __builtin_astype((x), ulong2) 6638 #define as_ulong3(x) __builtin_astype((x), ulong3) 6639 #define as_ulong4(x) __builtin_astype((x), ulong4) 6640 #define as_ulong8(x) __builtin_astype((x), ulong8) 6641 #define as_ulong16(x) __builtin_astype((x), ulong16) 6643 #define as_float(x) __builtin_astype((x), float) 6644 #define as_float2(x) __builtin_astype((x), float2) 6645 #define as_float3(x) __builtin_astype((x), float3) 6646 #define as_float4(x) __builtin_astype((x), float4) 6647 #define as_float8(x) __builtin_astype((x), float8) 6648 #define as_float16(x) __builtin_astype((x), float16) 6651 #define as_double(x) __builtin_astype((x), double) 6652 #define as_double2(x) __builtin_astype((x), double2) 6653 #define as_double3(x) __builtin_astype((x), double3) 6654 #define as_double4(x) __builtin_astype((x), double4) 6655 #define as_double8(x) __builtin_astype((x), double8) 6656 #define as_double16(x) __builtin_astype((x), double16) 6657 #endif //cl_khr_fp64 6660 #define as_half(x) __builtin_astype((x), half) 6661 #define as_half2(x) __builtin_astype((x), half2) 6662 #define as_half3(x) __builtin_astype((x), half3) 6663 #define as_half4(x) __builtin_astype((x), half4) 6664 #define as_half8(x) __builtin_astype((x), half8) 6665 #define as_half16(x) __builtin_astype((x), half16) 6666 #endif //cl_khr_fp16 6670 #define __kernel_exec(X, typen) __kernel \ 6671 __attribute__((work_group_size_hint(X, 1, 1))) \ 6672 __attribute__((vec_type_hint(typen))) 6674 #define kernel_exec(X, typen) __kernel \ 6675 __attribute__((work_group_size_hint(X, 1, 1))) \ 6676 __attribute__((vec_type_hint(typen))) 6763 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 6767 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 6787 #endif //cl_khr_fp64 6795 #endif //cl_khr_fp16 6813 #endif //cl_khr_fp64 6821 #endif //cl_khr_fp16 6839 #endif //cl_khr_fp64 6847 #endif //cl_khr_fp16 6865 #endif //cl_khr_fp64 6873 #endif //cl_khr_fp16 6891 #endif //cl_khr_fp64 6899 #endif //cl_khr_fp16 6917 #endif //cl_khr_fp64 6925 #endif //cl_khr_fp16 6943 #endif //cl_khr_fp64 6951 #endif //cl_khr_fp16 6969 #endif //cl_khr_fp64 6977 #endif //cl_khr_fp16 6995 #endif //cl_khr_fp64 7003 #endif //cl_khr_fp16 7021 #endif //cl_khr_fp64 7029 #endif //cl_khr_fp16 7047 #endif //cl_khr_fp64 7055 #endif //cl_khr_fp16 7073 #endif //cl_khr_fp64 7081 #endif //cl_khr_fp16 7100 #endif //cl_khr_fp64 7108 #endif //cl_khr_fp16 7126 #endif //cl_khr_fp64 7134 #endif //cl_khr_fp16 7152 #endif //cl_khr_fp64 7160 #endif //cl_khr_fp16 7178 #endif //cl_khr_fp64 7186 #endif //cl_khr_fp16 7204 #endif //cl_khr_fp64 7212 #endif //cl_khr_fp16 7230 #endif //cl_khr_fp64 7238 #endif //cl_khr_fp16 7257 #endif //cl_khr_fp64 7265 #endif //cl_khr_fp16 7283 #endif //cl_khr_fp64 7291 #endif //cl_khr_fp16 7309 #endif //cl_khr_fp64 7317 #endif //cl_khr_fp16 7335 #endif //cl_khr_fp64 7343 #endif //cl_khr_fp16 7361 #endif //cl_khr_fp64 7369 #endif //cl_khr_fp16 7387 #endif //cl_khr_fp64 7395 #endif //cl_khr_fp16 7413 #endif //cl_khr_fp64 7421 #endif //cl_khr_fp16 7440 #endif //cl_khr_fp64 7448 #endif //cl_khr_fp16 7470 #endif //cl_khr_fp64 7478 #endif //cl_khr_fp16 7509 #endif //cl_khr_fp64 7522 #endif //cl_khr_fp16 7553 #endif //cl_khr_fp64 7566 #endif //cl_khr_fp16 7584 #endif //cl_khr_fp64 7592 #endif //cl_khr_fp16 7598 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 7611 double16
__ovld fract(double16 x, double16 *iptr);
7612 #endif //cl_khr_fp64 7620 #endif //cl_khr_fp16 7622 float __ovld fract(
float x, __global
float *iptr);
7623 float2
__ovld fract(float2 x, __global float2 *iptr);
7624 float3
__ovld fract(float3 x, __global float3 *iptr);
7625 float4
__ovld fract(float4 x, __global float4 *iptr);
7626 float8
__ovld fract(float8 x, __global float8 *iptr);
7627 float16
__ovld fract(float16 x, __global float16 *iptr);
7629 float2
__ovld fract(float2 x, __local float2 *iptr);
7630 float3
__ovld fract(float3 x, __local float3 *iptr);
7631 float4
__ovld fract(float4 x, __local float4 *iptr);
7632 float8
__ovld fract(float8 x, __local float8 *iptr);
7633 float16
__ovld fract(float16 x, __local float16 *iptr);
7634 float __ovld fract(
float x, __private
float *iptr);
7635 float2
__ovld fract(float2 x, __private float2 *iptr);
7636 float3
__ovld fract(float3 x, __private float3 *iptr);
7637 float4
__ovld fract(float4 x, __private float4 *iptr);
7638 float8
__ovld fract(float8 x, __private float8 *iptr);
7639 float16
__ovld fract(float16 x, __private float16 *iptr);
7641 double __ovld fract(
double x, __global
double *iptr);
7642 double2
__ovld fract(double2 x, __global double2 *iptr);
7643 double3
__ovld fract(double3 x, __global double3 *iptr);
7644 double4
__ovld fract(double4 x, __global double4 *iptr);
7645 double8
__ovld fract(double8 x, __global double8 *iptr);
7646 double16
__ovld fract(double16 x, __global double16 *iptr);
7647 double __ovld fract(
double x, __local
double *iptr);
7648 double2
__ovld fract(double2 x, __local double2 *iptr);
7649 double3
__ovld fract(double3 x, __local double3 *iptr);
7650 double4
__ovld fract(double4 x, __local double4 *iptr);
7651 double8
__ovld fract(double8 x, __local double8 *iptr);
7652 double16
__ovld fract(double16 x, __local double16 *iptr);
7653 double __ovld fract(
double x, __private
double *iptr);
7654 double2
__ovld fract(double2 x, __private double2 *iptr);
7655 double3
__ovld fract(double3 x, __private double3 *iptr);
7656 double4
__ovld fract(double4 x, __private double4 *iptr);
7657 double8
__ovld fract(double8 x, __private double8 *iptr);
7658 double16
__ovld fract(double16 x, __private double16 *iptr);
7659 #endif //cl_khr_fp64 7662 half2
__ovld fract(half2 x, __global half2 *iptr);
7663 half3
__ovld fract(half3 x, __global half3 *iptr);
7664 half4
__ovld fract(half4 x, __global half4 *iptr);
7665 half8
__ovld fract(half8 x, __global half8 *iptr);
7666 half16
__ovld fract(half16 x, __global half16 *iptr);
7672 half16
__ovld fract(half16 x, __local half16 *iptr);
7674 half2
__ovld fract(half2 x, __private half2 *iptr);
7675 half3
__ovld fract(half3 x, __private half3 *iptr);
7676 half4
__ovld fract(half4 x, __private half4 *iptr);
7677 half8
__ovld fract(half8 x, __private half8 *iptr);
7678 half16
__ovld fract(half16 x, __private half16 *iptr);
7679 #endif //cl_khr_fp16 7680 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 7688 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 7702 #endif //cl_khr_fp64 7710 #endif //cl_khr_fp16 7749 #endif //cl_khr_fp64 7769 #endif //cl_khr_fp16 7770 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 7789 #endif //cl_khr_fp64 7797 #endif //cl_khr_fp16 7815 #endif //cl_khr_fp64 7823 #endif //cl_khr_fp16 7851 #endif //cl_khr_fp64 7864 #endif //cl_khr_fp16 7885 #endif //cl_khr_fp64 7893 #endif //cl_khr_fp16 7895 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 7909 #endif //cl_khr_fp64 7917 #endif //cl_khr_fp16 7956 #endif //cl_khr_fp64 7976 #endif //cl_khr_fp16 7977 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 7995 #endif //cl_khr_fp64 8003 #endif //cl_khr_fp16 8021 #endif //cl_khr_fp64 8029 #endif //cl_khr_fp16 8047 #endif //cl_khr_fp64 8055 #endif //cl_khr_fp16 8073 #endif //cl_khr_fp64 8081 #endif //cl_khr_fp16 8100 #endif //cl_khr_fp64 8108 #endif //cl_khr_fp16 8130 #endif //cl_khr_fp64 8138 #endif //cl_khr_fp16 8157 #endif //cl_khr_fp64 8165 #endif //cl_khr_fp16 8184 #endif //cl_khr_fp64 8192 #endif //cl_khr_fp16 8201 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 8207 float16
__ovld modf(float16 x, float16 *iptr);
8210 double2
__ovld modf(double2 x, double2 *iptr);
8211 double3
__ovld modf(double3 x, double3 *iptr);
8212 double4
__ovld modf(double4 x, double4 *iptr);
8213 double8
__ovld modf(double8 x, double8 *iptr);
8214 double16
__ovld modf(double16 x, double16 *iptr);
8215 #endif //cl_khr_fp64 8223 #endif //cl_khr_fp16 8225 float __ovld modf(
float x, __global
float *iptr);
8226 float2
__ovld modf(float2 x, __global float2 *iptr);
8227 float3
__ovld modf(float3 x, __global float3 *iptr);
8228 float4
__ovld modf(float4 x, __global float4 *iptr);
8229 float8
__ovld modf(float8 x, __global float8 *iptr);
8230 float16
__ovld modf(float16 x, __global float16 *iptr);
8231 float __ovld modf(
float x, __local
float *iptr);
8232 float2
__ovld modf(float2 x, __local float2 *iptr);
8233 float3
__ovld modf(float3 x, __local float3 *iptr);
8234 float4
__ovld modf(float4 x, __local float4 *iptr);
8235 float8
__ovld modf(float8 x, __local float8 *iptr);
8236 float16
__ovld modf(float16 x, __local float16 *iptr);
8237 float __ovld modf(
float x, __private
float *iptr);
8238 float2
__ovld modf(float2 x, __private float2 *iptr);
8239 float3
__ovld modf(float3 x, __private float3 *iptr);
8240 float4
__ovld modf(float4 x, __private float4 *iptr);
8241 float8
__ovld modf(float8 x, __private float8 *iptr);
8242 float16
__ovld modf(float16 x, __private float16 *iptr);
8244 double __ovld modf(
double x, __global
double *iptr);
8245 double2
__ovld modf(double2 x, __global double2 *iptr);
8246 double3
__ovld modf(double3 x, __global double3 *iptr);
8247 double4
__ovld modf(double4 x, __global double4 *iptr);
8248 double8
__ovld modf(double8 x, __global double8 *iptr);
8249 double16
__ovld modf(double16 x, __global double16 *iptr);
8250 double __ovld modf(
double x, __local
double *iptr);
8251 double2
__ovld modf(double2 x, __local double2 *iptr);
8252 double3
__ovld modf(double3 x, __local double3 *iptr);
8253 double4
__ovld modf(double4 x, __local double4 *iptr);
8254 double8
__ovld modf(double8 x, __local double8 *iptr);
8255 double16
__ovld modf(double16 x, __local double16 *iptr);
8256 double __ovld modf(
double x, __private
double *iptr);
8257 double2
__ovld modf(double2 x, __private double2 *iptr);
8258 double3
__ovld modf(double3 x, __private double3 *iptr);
8259 double4
__ovld modf(double4 x, __private double4 *iptr);
8260 double8
__ovld modf(double8 x, __private double8 *iptr);
8261 double16
__ovld modf(double16 x, __private double16 *iptr);
8262 #endif //cl_khr_fp64 8264 half
__ovld modf(half x, __global half *iptr);
8265 half2
__ovld modf(half2 x, __global half2 *iptr);
8266 half3
__ovld modf(half3 x, __global half3 *iptr);
8267 half4
__ovld modf(half4 x, __global half4 *iptr);
8268 half8
__ovld modf(half8 x, __global half8 *iptr);
8269 half16
__ovld modf(half16 x, __global half16 *iptr);
8270 half
__ovld modf(half x, __local half *iptr);
8271 half2
__ovld modf(half2 x, __local half2 *iptr);
8272 half3
__ovld modf(half3 x, __local half3 *iptr);
8273 half4
__ovld modf(half4 x, __local half4 *iptr);
8274 half8
__ovld modf(half8 x, __local half8 *iptr);
8275 half16
__ovld modf(half16 x, __local half16 *iptr);
8276 half
__ovld modf(half x, __private half *iptr);
8277 half2
__ovld modf(half2 x, __private half2 *iptr);
8278 half3
__ovld modf(half3 x, __private half3 *iptr);
8279 half4
__ovld modf(half4 x, __private half4 *iptr);
8280 half8
__ovld modf(half8 x, __private half8 *iptr);
8281 half16
__ovld modf(half16 x, __private half16 *iptr);
8282 #endif //cl_khr_fp16 8283 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 8302 #endif //cl_khr_fp64 8310 #endif //cl_khr_fp16 8332 #endif //cl_khr_fp64 8340 #endif //cl_khr_fp16 8358 #endif //cl_khr_fp64 8366 #endif //cl_khr_fp16 8384 #endif //cl_khr_fp64 8392 #endif //cl_khr_fp16 8410 #endif //cl_khr_fp64 8418 #endif //cl_khr_fp16 8439 #endif //cl_khr_fp64 8447 #endif //cl_khr_fp16 8461 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 8467 float16
__ovld remquo(float16 x, float16 y, int16 *quo);
8470 double2
__ovld remquo(double2 x, double2 y, int2 *quo);
8471 double3
__ovld remquo(double3 x, double3 y, int3 *quo);
8472 double4
__ovld remquo(double4 x, double4 y, int4 *quo);
8473 double8
__ovld remquo(double8 x, double8 y, int8 *quo);
8474 double16
__ovld remquo(double16 x, double16 y, int16 *quo);
8475 #endif //cl_khr_fp64 8484 #endif //cl_khr_fp16 8486 float __ovld remquo(
float x,
float y, __global
int *quo);
8487 float2
__ovld remquo(float2 x, float2 y, __global int2 *quo);
8488 float3
__ovld remquo(float3 x, float3 y, __global int3 *quo);
8489 float4
__ovld remquo(float4 x, float4 y, __global int4 *quo);
8490 float8
__ovld remquo(float8 x, float8 y, __global int8 *quo);
8491 float16
__ovld remquo(float16 x, float16 y, __global int16 *quo);
8492 float __ovld remquo(
float x,
float y, __local
int *quo);
8493 float2
__ovld remquo(float2 x, float2 y, __local int2 *quo);
8494 float3
__ovld remquo(float3 x, float3 y, __local int3 *quo);
8495 float4
__ovld remquo(float4 x, float4 y, __local int4 *quo);
8496 float8
__ovld remquo(float8 x, float8 y, __local int8 *quo);
8497 float16
__ovld remquo(float16 x, float16 y, __local int16 *quo);
8498 float __ovld remquo(
float x,
float y, __private
int *quo);
8499 float2
__ovld remquo(float2 x, float2 y, __private int2 *quo);
8500 float3
__ovld remquo(float3 x, float3 y, __private int3 *quo);
8501 float4
__ovld remquo(float4 x, float4 y, __private int4 *quo);
8502 float8
__ovld remquo(float8 x, float8 y, __private int8 *quo);
8503 float16
__ovld remquo(float16 x, float16 y, __private int16 *quo);
8505 double __ovld remquo(
double x,
double y, __global
int *quo);
8506 double2
__ovld remquo(double2 x, double2 y, __global int2 *quo);
8507 double3
__ovld remquo(double3 x, double3 y, __global int3 *quo);
8508 double4
__ovld remquo(double4 x, double4 y, __global int4 *quo);
8509 double8
__ovld remquo(double8 x, double8 y, __global int8 *quo);
8510 double16
__ovld remquo(double16 x, double16 y, __global int16 *quo);
8511 double __ovld remquo(
double x,
double y, __local
int *quo);
8512 double2
__ovld remquo(double2 x, double2 y, __local int2 *quo);
8513 double3
__ovld remquo(double3 x, double3 y, __local int3 *quo);
8514 double4
__ovld remquo(double4 x, double4 y, __local int4 *quo);
8515 double8
__ovld remquo(double8 x, double8 y, __local int8 *quo);
8516 double16
__ovld remquo(double16 x, double16 y, __local int16 *quo);
8517 double __ovld remquo(
double x,
double y, __private
int *quo);
8518 double2
__ovld remquo(double2 x, double2 y, __private int2 *quo);
8519 double3
__ovld remquo(double3 x, double3 y, __private int3 *quo);
8520 double4
__ovld remquo(double4 x, double4 y, __private int4 *quo);
8521 double8
__ovld remquo(double8 x, double8 y, __private int8 *quo);
8522 double16
__ovld remquo(double16 x, double16 y, __private int16 *quo);
8523 #endif //cl_khr_fp64 8526 half2
__ovld remquo(half2 x, half2 y, __global int2 *quo);
8527 half3
__ovld remquo(half3 x, half3 y, __global int3 *quo);
8528 half4
__ovld remquo(half4 x, half4 y, __global int4 *quo);
8529 half8
__ovld remquo(half8 x, half8 y, __global int8 *quo);
8530 half16
__ovld remquo(half16 x, half16 y, __global int16 *quo);
8532 half2
__ovld remquo(half2 x, half2 y, __local int2 *quo);
8533 half3
__ovld remquo(half3 x, half3 y, __local int3 *quo);
8534 half4
__ovld remquo(half4 x, half4 y, __local int4 *quo);
8535 half8
__ovld remquo(half8 x, half8 y, __local int8 *quo);
8536 half16
__ovld remquo(half16 x, half16 y, __local int16 *quo);
8537 half
__ovld remquo(half x, half y, __private
int *quo);
8538 half2
__ovld remquo(half2 x, half2 y, __private int2 *quo);
8539 half3
__ovld remquo(half3 x, half3 y, __private int3 *quo);
8540 half4
__ovld remquo(half4 x, half4 y, __private int4 *quo);
8541 half8
__ovld remquo(half8 x, half8 y, __private int8 *quo);
8542 half16
__ovld remquo(half16 x, half16 y, __private int16 *quo);
8543 #endif //cl_khr_fp16 8544 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 8564 #endif //cl_khr_fp64 8572 #endif //cl_khr_fp16 8590 #endif //cl_khr_fp64 8598 #endif //cl_khr_fp16 8618 #endif //cl_khr_fp64 8626 #endif //cl_khr_fp16 8644 #endif //cl_khr_fp64 8652 #endif //cl_khr_fp16 8670 #endif //cl_khr_fp64 8678 #endif //cl_khr_fp16 8685 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 8699 #endif //cl_khr_fp64 8707 #endif //cl_khr_fp16 8710 float2
__ovld sincos(float2 x, __global float2 *cosval);
8711 float3
__ovld sincos(float3 x, __global float3 *cosval);
8712 float4
__ovld sincos(float4 x, __global float4 *cosval);
8713 float8
__ovld sincos(float8 x, __global float8 *cosval);
8714 float16
__ovld sincos(float16 x, __global float16 *cosval);
8716 float2
__ovld sincos(float2 x, __local float2 *cosval);
8717 float3
__ovld sincos(float3 x, __local float3 *cosval);
8718 float4
__ovld sincos(float4 x, __local float4 *cosval);
8719 float8
__ovld sincos(float8 x, __local float8 *cosval);
8720 float16
__ovld sincos(float16 x, __local float16 *cosval);
8722 float2
__ovld sincos(float2 x, __private float2 *cosval);
8723 float3
__ovld sincos(float3 x, __private float3 *cosval);
8724 float4
__ovld sincos(float4 x, __private float4 *cosval);
8725 float8
__ovld sincos(float8 x, __private float8 *cosval);
8726 float16
__ovld sincos(float16 x, __private float16 *cosval);
8728 double __ovld sincos(
double x, __global
double *cosval);
8729 double2
__ovld sincos(double2 x, __global double2 *cosval);
8730 double3
__ovld sincos(double3 x, __global double3 *cosval);
8731 double4
__ovld sincos(double4 x, __global double4 *cosval);
8732 double8
__ovld sincos(double8 x, __global double8 *cosval);
8733 double16
__ovld sincos(double16 x, __global double16 *cosval);
8734 double __ovld sincos(
double x, __local
double *cosval);
8735 double2
__ovld sincos(double2 x, __local double2 *cosval);
8736 double3
__ovld sincos(double3 x, __local double3 *cosval);
8737 double4
__ovld sincos(double4 x, __local double4 *cosval);
8738 double8
__ovld sincos(double8 x, __local double8 *cosval);
8739 double16
__ovld sincos(double16 x, __local double16 *cosval);
8740 double __ovld sincos(
double x, __private
double *cosval);
8741 double2
__ovld sincos(double2 x, __private double2 *cosval);
8742 double3
__ovld sincos(double3 x, __private double3 *cosval);
8743 double4
__ovld sincos(double4 x, __private double4 *cosval);
8744 double8
__ovld sincos(double8 x, __private double8 *cosval);
8745 double16
__ovld sincos(double16 x, __private double16 *cosval);
8746 #endif //cl_khr_fp64 8753 half16
__ovld sincos(half16 x, __global half16 *cosval);
8759 half16
__ovld sincos(half16 x, __local half16 *cosval);
8765 half16
__ovld sincos(half16 x, __private half16 *cosval);
8766 #endif //cl_khr_fp16 8767 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 8785 #endif //cl_khr_fp64 8793 #endif //cl_khr_fp16 8811 #endif //cl_khr_fp64 8819 #endif //cl_khr_fp16 8837 #endif //cl_khr_fp64 8845 #endif //cl_khr_fp16 8863 #endif //cl_khr_fp64 8871 #endif //cl_khr_fp16 8889 #endif //cl_khr_fp64 8897 #endif //cl_khr_fp16 8915 #endif //cl_khr_fp64 8923 #endif //cl_khr_fp16 8941 #endif //cl_khr_fp64 8949 #endif //cl_khr_fp16 8968 #endif //cl_khr_fp64 8976 #endif //cl_khr_fp16 9700 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 9749 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 10390 #endif //cl_khr_fp64 10403 #endif //cl_khr_fp16 10422 #endif //cl_khr_fp64 10430 #endif //cl_khr_fp16 10459 #endif //cl_khr_fp64 10472 #endif //cl_khr_fp16 10501 #endif //cl_khr_fp64 10514 #endif //cl_khr_fp16 10546 #endif //cl_khr_fp64 10559 #endif //cl_khr_fp16 10578 #endif //cl_khr_fp64 10586 #endif //cl_khr_fp16 10614 #endif //cl_khr_fp64 10628 #endif //cl_khr_fp16 10666 #endif //cl_khr_fp64 10680 #endif //cl_khr_fp16 10699 #endif //cl_khr_fp64 10707 #endif //cl_khr_fp16 10720 #endif //cl_khr_fp64 10724 #endif //cl_khr_fp16 10738 #endif //cl_khr_fp64 10744 #endif //cl_khr_fp16 10759 #endif //cl_khr_fp64 10765 #endif //cl_khr_fp16 10780 #endif //cl_khr_fp64 10786 #endif //cl_khr_fp16 10801 #endif //cl_khr_fp64 10807 #endif //cl_khr_fp16 10821 #endif //cl_khr_fp16 10836 #endif //cl_khr_fp16 10868 #endif //cl_khr_fp16 10889 #endif //cl_khr_fp64 10897 #endif //cl_khr_fp16 10915 #endif //cl_khr_fp64 10923 #endif //cl_khr_fp16 10941 #endif //cl_khr_fp64 10949 #endif //cl_khr_fp16 10967 #endif //cl_khr_fp64 10975 #endif //cl_khr_fp16 10993 #endif //cl_khr_fp64 11001 #endif //cl_khr_fp16 11019 #endif //cl_khr_fp64 11027 #endif //cl_khr_fp16 11046 #endif //cl_khr_fp64 11054 #endif //cl_khr_fp16 11072 #endif //cl_khr_fp64 11080 #endif //cl_khr_fp16 11098 #endif //cl_khr_fp64 11106 #endif //cl_khr_fp16 11124 #endif //cl_khr_fp64 11132 #endif //cl_khr_fp16 11150 #endif //cl_khr_fp64 11158 #endif //cl_khr_fp16 11178 #endif //cl_khr_fp64 11186 #endif //cl_khr_fp16 11206 #endif //cl_khr_fp64 11214 #endif //cl_khr_fp16 11236 #endif //cl_khr_fp64 11244 #endif //cl_khr_fp16 11370 #endif //cl_khr_fp64 11378 #endif //cl_khr_fp16 11516 #endif //cl_khr_fp64 11530 #endif //cl_khr_fp16 11550 char2
__ovld vload2(
size_t offset,
const __constant
char *p);
11552 short2
__ovld vload2(
size_t offset,
const __constant
short *p);
11554 int2
__ovld vload2(
size_t offset,
const __constant
int *p);
11556 long2
__ovld vload2(
size_t offset,
const __constant
long *p);
11558 float2
__ovld vload2(
size_t offset,
const __constant
float *p);
11559 char3
__ovld vload3(
size_t offset,
const __constant
char *p);
11561 short3
__ovld vload3(
size_t offset,
const __constant
short *p);
11563 int3
__ovld vload3(
size_t offset,
const __constant
int *p);
11565 long3
__ovld vload3(
size_t offset,
const __constant
long *p);
11567 float3
__ovld vload3(
size_t offset,
const __constant
float *p);
11568 char4
__ovld vload4(
size_t offset,
const __constant
char *p);
11570 short4
__ovld vload4(
size_t offset,
const __constant
short *p);
11572 int4
__ovld vload4(
size_t offset,
const __constant
int *p);
11574 long4
__ovld vload4(
size_t offset,
const __constant
long *p);
11576 float4
__ovld vload4(
size_t offset,
const __constant
float *p);
11577 char8
__ovld vload8(
size_t offset,
const __constant
char *p);
11579 short8
__ovld vload8(
size_t offset,
const __constant
short *p);
11581 int8
__ovld vload8(
size_t offset,
const __constant
int *p);
11583 long8
__ovld vload8(
size_t offset,
const __constant
long *p);
11585 float8
__ovld vload8(
size_t offset,
const __constant
float *p);
11586 char16
__ovld vload16(
size_t offset,
const __constant
char *p);
11588 short16
__ovld vload16(
size_t offset,
const __constant
short *p);
11590 int16
__ovld vload16(
size_t offset,
const __constant
int *p);
11592 long16
__ovld vload16(
size_t offset,
const __constant
long *p);
11594 float16
__ovld vload16(
size_t offset,
const __constant
float *p);
11596 double2
__ovld vload2(
size_t offset,
const __constant
double *p);
11597 double3
__ovld vload3(
size_t offset,
const __constant
double *p);
11598 double4
__ovld vload4(
size_t offset,
const __constant
double *p);
11599 double8
__ovld vload8(
size_t offset,
const __constant
double *p);
11600 double16
__ovld vload16(
size_t offset,
const __constant
double *p);
11601 #endif //cl_khr_fp64 11604 half
__ovld vload(
size_t offset,
const __constant half *p);
11605 half2
__ovld vload2(
size_t offset,
const __constant half *p);
11606 half3
__ovld vload3(
size_t offset,
const __constant half *p);
11607 half4
__ovld vload4(
size_t offset,
const __constant half *p);
11608 half8
__ovld vload8(
size_t offset,
const __constant half *p);
11609 half16
__ovld vload16(
size_t offset,
const __constant half *p);
11610 #endif //cl_khr_fp16 11612 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 11660 double2
__ovld vload2(
size_t offset,
const double *p);
11661 double3
__ovld vload3(
size_t offset,
const double *p);
11662 double4
__ovld vload4(
size_t offset,
const double *p);
11663 double8
__ovld vload8(
size_t offset,
const double *p);
11665 #endif //cl_khr_fp64 11668 half
__ovld vload(
size_t offset,
const half *p);
11674 #endif //cl_khr_fp16 11676 char2
__ovld vload2(
size_t offset,
const __global
char *p);
11678 short2
__ovld vload2(
size_t offset,
const __global
short *p);
11680 int2
__ovld vload2(
size_t offset,
const __global
int *p);
11682 long2
__ovld vload2(
size_t offset,
const __global
long *p);
11684 float2
__ovld vload2(
size_t offset,
const __global
float *p);
11685 char3
__ovld vload3(
size_t offset,
const __global
char *p);
11687 short3
__ovld vload3(
size_t offset,
const __global
short *p);
11689 int3
__ovld vload3(
size_t offset,
const __global
int *p);
11691 long3
__ovld vload3(
size_t offset,
const __global
long *p);
11693 float3
__ovld vload3(
size_t offset,
const __global
float *p);
11694 char4
__ovld vload4(
size_t offset,
const __global
char *p);
11696 short4
__ovld vload4(
size_t offset,
const __global
short *p);
11698 int4
__ovld vload4(
size_t offset,
const __global
int *p);
11700 long4
__ovld vload4(
size_t offset,
const __global
long *p);
11702 float4
__ovld vload4(
size_t offset,
const __global
float *p);
11703 char8
__ovld vload8(
size_t offset,
const __global
char *p);
11705 short8
__ovld vload8(
size_t offset,
const __global
short *p);
11707 int8
__ovld vload8(
size_t offset,
const __global
int *p);
11709 long8
__ovld vload8(
size_t offset,
const __global
long *p);
11711 float8
__ovld vload8(
size_t offset,
const __global
float *p);
11712 char16
__ovld vload16(
size_t offset,
const __global
char *p);
11714 short16
__ovld vload16(
size_t offset,
const __global
short *p);
11716 int16
__ovld vload16(
size_t offset,
const __global
int *p);
11718 long16
__ovld vload16(
size_t offset,
const __global
long *p);
11720 float16
__ovld vload16(
size_t offset,
const __global
float *p);
11721 char2
__ovld vload2(
size_t offset,
const __local
char *p);
11723 short2
__ovld vload2(
size_t offset,
const __local
short *p);
11725 int2
__ovld vload2(
size_t offset,
const __local
int *p);
11727 long2
__ovld vload2(
size_t offset,
const __local
long *p);
11729 float2
__ovld vload2(
size_t offset,
const __local
float *p);
11730 char3
__ovld vload3(
size_t offset,
const __local
char *p);
11732 short3
__ovld vload3(
size_t offset,
const __local
short *p);
11734 int3
__ovld vload3(
size_t offset,
const __local
int *p);
11736 long3
__ovld vload3(
size_t offset,
const __local
long *p);
11738 float3
__ovld vload3(
size_t offset,
const __local
float *p);
11739 char4
__ovld vload4(
size_t offset,
const __local
char *p);
11741 short4
__ovld vload4(
size_t offset,
const __local
short *p);
11743 int4
__ovld vload4(
size_t offset,
const __local
int *p);
11745 long4
__ovld vload4(
size_t offset,
const __local
long *p);
11747 float4
__ovld vload4(
size_t offset,
const __local
float *p);
11748 char8
__ovld vload8(
size_t offset,
const __local
char *p);
11750 short8
__ovld vload8(
size_t offset,
const __local
short *p);
11752 int8
__ovld vload8(
size_t offset,
const __local
int *p);
11754 long8
__ovld vload8(
size_t offset,
const __local
long *p);
11756 float8
__ovld vload8(
size_t offset,
const __local
float *p);
11757 char16
__ovld vload16(
size_t offset,
const __local
char *p);
11759 short16
__ovld vload16(
size_t offset,
const __local
short *p);
11763 long16
__ovld vload16(
size_t offset,
const __local
long *p);
11765 float16
__ovld vload16(
size_t offset,
const __local
float *p);
11766 char2
__ovld vload2(
size_t offset,
const __private
char *p);
11768 short2
__ovld vload2(
size_t offset,
const __private
short *p);
11770 int2
__ovld vload2(
size_t offset,
const __private
int *p);
11772 long2
__ovld vload2(
size_t offset,
const __private
long *p);
11774 float2
__ovld vload2(
size_t offset,
const __private
float *p);
11775 char3
__ovld vload3(
size_t offset,
const __private
char *p);
11777 short3
__ovld vload3(
size_t offset,
const __private
short *p);
11779 int3
__ovld vload3(
size_t offset,
const __private
int *p);
11781 long3
__ovld vload3(
size_t offset,
const __private
long *p);
11783 float3
__ovld vload3(
size_t offset,
const __private
float *p);
11784 char4
__ovld vload4(
size_t offset,
const __private
char *p);
11786 short4
__ovld vload4(
size_t offset,
const __private
short *p);
11788 int4
__ovld vload4(
size_t offset,
const __private
int *p);
11790 long4
__ovld vload4(
size_t offset,
const __private
long *p);
11792 float4
__ovld vload4(
size_t offset,
const __private
float *p);
11793 char8
__ovld vload8(
size_t offset,
const __private
char *p);
11795 short8
__ovld vload8(
size_t offset,
const __private
short *p);
11797 int8
__ovld vload8(
size_t offset,
const __private
int *p);
11799 long8
__ovld vload8(
size_t offset,
const __private
long *p);
11801 float8
__ovld vload8(
size_t offset,
const __private
float *p);
11802 char16
__ovld vload16(
size_t offset,
const __private
char *p);
11804 short16
__ovld vload16(
size_t offset,
const __private
short *p);
11806 int16
__ovld vload16(
size_t offset,
const __private
int *p);
11808 long16
__ovld vload16(
size_t offset,
const __private
long *p);
11810 float16
__ovld vload16(
size_t offset,
const __private
float *p);
11813 double2
__ovld vload2(
size_t offset,
const __global
double *p);
11814 double3
__ovld vload3(
size_t offset,
const __global
double *p);
11815 double4
__ovld vload4(
size_t offset,
const __global
double *p);
11816 double8
__ovld vload8(
size_t offset,
const __global
double *p);
11817 double16
__ovld vload16(
size_t offset,
const __global
double *p);
11818 double2
__ovld vload2(
size_t offset,
const __local
double *p);
11819 double3
__ovld vload3(
size_t offset,
const __local
double *p);
11820 double4
__ovld vload4(
size_t offset,
const __local
double *p);
11821 double8
__ovld vload8(
size_t offset,
const __local
double *p);
11822 double16
__ovld vload16(
size_t offset,
const __local
double *p);
11823 double2
__ovld vload2(
size_t offset,
const __private
double *p);
11824 double3
__ovld vload3(
size_t offset,
const __private
double *p);
11825 double4
__ovld vload4(
size_t offset,
const __private
double *p);
11826 double8
__ovld vload8(
size_t offset,
const __private
double *p);
11827 double16
__ovld vload16(
size_t offset,
const __private
double *p);
11828 #endif //cl_khr_fp64 11831 half
__ovld vload(
size_t offset,
const __global half *p);
11832 half2
__ovld vload2(
size_t offset,
const __global half *p);
11833 half3
__ovld vload3(
size_t offset,
const __global half *p);
11834 half4
__ovld vload4(
size_t offset,
const __global half *p);
11835 half8
__ovld vload8(
size_t offset,
const __global half *p);
11836 half16
__ovld vload16(
size_t offset,
const __global half *p);
11837 half
__ovld vload(
size_t offset,
const __local half *p);
11838 half2
__ovld vload2(
size_t offset,
const __local half *p);
11839 half3
__ovld vload3(
size_t offset,
const __local half *p);
11840 half4
__ovld vload4(
size_t offset,
const __local half *p);
11841 half8
__ovld vload8(
size_t offset,
const __local half *p);
11842 half16
__ovld vload16(
size_t offset,
const __local half *p);
11843 half
__ovld vload(
size_t offset,
const __private half *p);
11844 half2
__ovld vload2(
size_t offset,
const __private half *p);
11845 half3
__ovld vload3(
size_t offset,
const __private half *p);
11846 half4
__ovld vload4(
size_t offset,
const __private half *p);
11847 half8
__ovld vload8(
size_t offset,
const __private half *p);
11848 half16
__ovld vload16(
size_t offset,
const __private half *p);
11849 #endif //cl_khr_fp16 11850 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 11852 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 11899 void __ovld vstore2(double2 data,
size_t offset,
double *p);
11900 void __ovld vstore3(double3 data,
size_t offset,
double *p);
11901 void __ovld vstore4(double4 data,
size_t offset,
double *p);
11902 void __ovld vstore8(double8 data,
size_t offset,
double *p);
11904 #endif //cl_khr_fp64 11906 void __ovld vstore(half data,
size_t offset, half *p);
11912 #endif //cl_khr_fp16 11914 void __ovld vstore2(char2 data,
size_t offset, __global
char *p);
11916 void __ovld vstore2(short2 data,
size_t offset, __global
short *p);
11918 void __ovld vstore2(int2 data,
size_t offset, __global
int *p);
11920 void __ovld vstore2(long2 data,
size_t offset, __global
long *p);
11922 void __ovld vstore2(float2 data,
size_t offset, __global
float *p);
11923 void __ovld vstore3(char3 data,
size_t offset, __global
char *p);
11925 void __ovld vstore3(short3 data,
size_t offset, __global
short *p);
11927 void __ovld vstore3(int3 data,
size_t offset, __global
int *p);
11929 void __ovld vstore3(long3 data,
size_t offset, __global
long *p);
11931 void __ovld vstore3(float3 data,
size_t offset, __global
float *p);
11932 void __ovld vstore4(char4 data,
size_t offset, __global
char *p);
11934 void __ovld vstore4(short4 data,
size_t offset, __global
short *p);
11936 void __ovld vstore4(int4 data,
size_t offset, __global
int *p);
11938 void __ovld vstore4(long4 data,
size_t offset, __global
long *p);
11940 void __ovld vstore4(float4 data,
size_t offset, __global
float *p);
11941 void __ovld vstore8(char8 data,
size_t offset, __global
char *p);
11943 void __ovld vstore8(short8 data,
size_t offset, __global
short *p);
11945 void __ovld vstore8(int8 data,
size_t offset, __global
int *p);
11947 void __ovld vstore8(long8 data,
size_t offset, __global
long *p);
11949 void __ovld vstore8(float8 data,
size_t offset, __global
float *p);
11950 void __ovld vstore16(char16 data,
size_t offset, __global
char *p);
11952 void __ovld vstore16(short16 data,
size_t offset, __global
short *p);
11954 void __ovld vstore16(int16 data,
size_t offset, __global
int *p);
11956 void __ovld vstore16(long16 data,
size_t offset, __global
long *p);
11958 void __ovld vstore16(float16 data,
size_t offset, __global
float *p);
11959 void __ovld vstore2(char2 data,
size_t offset, __local
char *p);
11961 void __ovld vstore2(short2 data,
size_t offset, __local
short *p);
11963 void __ovld vstore2(int2 data,
size_t offset, __local
int *p);
11965 void __ovld vstore2(long2 data,
size_t offset, __local
long *p);
11967 void __ovld vstore2(float2 data,
size_t offset, __local
float *p);
11968 void __ovld vstore3(char3 data,
size_t offset, __local
char *p);
11970 void __ovld vstore3(short3 data,
size_t offset, __local
short *p);
11972 void __ovld vstore3(int3 data,
size_t offset, __local
int *p);
11974 void __ovld vstore3(long3 data,
size_t offset, __local
long *p);
11976 void __ovld vstore3(float3 data,
size_t offset, __local
float *p);
11977 void __ovld vstore4(char4 data,
size_t offset, __local
char *p);
11979 void __ovld vstore4(short4 data,
size_t offset, __local
short *p);
11981 void __ovld vstore4(int4 data,
size_t offset, __local
int *p);
11983 void __ovld vstore4(long4 data,
size_t offset, __local
long *p);
11985 void __ovld vstore4(float4 data,
size_t offset, __local
float *p);
11986 void __ovld vstore8(char8 data,
size_t offset, __local
char *p);
11988 void __ovld vstore8(short8 data,
size_t offset, __local
short *p);
11990 void __ovld vstore8(int8 data,
size_t offset, __local
int *p);
11992 void __ovld vstore8(long8 data,
size_t offset, __local
long *p);
11994 void __ovld vstore8(float8 data,
size_t offset, __local
float *p);
11995 void __ovld vstore16(char16 data,
size_t offset, __local
char *p);
11997 void __ovld vstore16(short16 data,
size_t offset, __local
short *p);
11999 void __ovld vstore16(int16 data,
size_t offset, __local
int *p);
12001 void __ovld vstore16(long16 data,
size_t offset, __local
long *p);
12003 void __ovld vstore16(float16 data,
size_t offset, __local
float *p);
12004 void __ovld vstore2(char2 data,
size_t offset, __private
char *p);
12006 void __ovld vstore2(short2 data,
size_t offset, __private
short *p);
12008 void __ovld vstore2(int2 data,
size_t offset, __private
int *p);
12010 void __ovld vstore2(long2 data,
size_t offset, __private
long *p);
12012 void __ovld vstore2(float2 data,
size_t offset, __private
float *p);
12013 void __ovld vstore3(char3 data,
size_t offset, __private
char *p);
12015 void __ovld vstore3(short3 data,
size_t offset, __private
short *p);
12017 void __ovld vstore3(int3 data,
size_t offset, __private
int *p);
12019 void __ovld vstore3(long3 data,
size_t offset, __private
long *p);
12021 void __ovld vstore3(float3 data,
size_t offset, __private
float *p);
12022 void __ovld vstore4(char4 data,
size_t offset, __private
char *p);
12024 void __ovld vstore4(short4 data,
size_t offset, __private
short *p);
12026 void __ovld vstore4(int4 data,
size_t offset, __private
int *p);
12028 void __ovld vstore4(long4 data,
size_t offset, __private
long *p);
12030 void __ovld vstore4(float4 data,
size_t offset, __private
float *p);
12031 void __ovld vstore8(char8 data,
size_t offset, __private
char *p);
12033 void __ovld vstore8(short8 data,
size_t offset, __private
short *p);
12035 void __ovld vstore8(int8 data,
size_t offset, __private
int *p);
12037 void __ovld vstore8(long8 data,
size_t offset, __private
long *p);
12039 void __ovld vstore8(float8 data,
size_t offset, __private
float *p);
12040 void __ovld vstore16(char16 data,
size_t offset, __private
char *p);
12042 void __ovld vstore16(short16 data,
size_t offset, __private
short *p);
12044 void __ovld vstore16(int16 data,
size_t offset, __private
int *p);
12046 void __ovld vstore16(long16 data,
size_t offset, __private
long *p);
12048 void __ovld vstore16(float16 data,
size_t offset, __private
float *p);
12050 void __ovld vstore2(double2 data,
size_t offset, __global
double *p);
12051 void __ovld vstore3(double3 data,
size_t offset, __global
double *p);
12052 void __ovld vstore4(double4 data,
size_t offset, __global
double *p);
12053 void __ovld vstore8(double8 data,
size_t offset, __global
double *p);
12054 void __ovld vstore16(double16 data,
size_t offset, __global
double *p);
12055 void __ovld vstore2(double2 data,
size_t offset, __local
double *p);
12056 void __ovld vstore3(double3 data,
size_t offset, __local
double *p);
12057 void __ovld vstore4(double4 data,
size_t offset, __local
double *p);
12058 void __ovld vstore8(double8 data,
size_t offset, __local
double *p);
12059 void __ovld vstore16(double16 data,
size_t offset, __local
double *p);
12060 void __ovld vstore2(double2 data,
size_t offset, __private
double *p);
12061 void __ovld vstore3(double3 data,
size_t offset, __private
double *p);
12062 void __ovld vstore4(double4 data,
size_t offset, __private
double *p);
12063 void __ovld vstore8(double8 data,
size_t offset, __private
double *p);
12064 void __ovld vstore16(double16 data,
size_t offset, __private
double *p);
12065 #endif //cl_khr_fp64 12067 void __ovld vstore(half data,
size_t offset, __global half *p);
12068 void __ovld vstore2(half2 data,
size_t offset, __global half *p);
12069 void __ovld vstore3(half3 data,
size_t offset, __global half *p);
12070 void __ovld vstore4(half4 data,
size_t offset, __global half *p);
12071 void __ovld vstore8(half8 data,
size_t offset, __global half *p);
12072 void __ovld vstore16(half16 data,
size_t offset, __global half *p);
12073 void __ovld vstore(half data,
size_t offset, __local half *p);
12074 void __ovld vstore2(half2 data,
size_t offset, __local half *p);
12075 void __ovld vstore3(half3 data,
size_t offset, __local half *p);
12076 void __ovld vstore4(half4 data,
size_t offset, __local half *p);
12077 void __ovld vstore8(half8 data,
size_t offset, __local half *p);
12078 void __ovld vstore16(half16 data,
size_t offset, __local half *p);
12079 void __ovld vstore(half data,
size_t offset, __private half *p);
12080 void __ovld vstore2(half2 data,
size_t offset, __private half *p);
12081 void __ovld vstore3(half3 data,
size_t offset, __private half *p);
12082 void __ovld vstore4(half4 data,
size_t offset, __private half *p);
12083 void __ovld vstore8(half8 data,
size_t offset, __private half *p);
12084 void __ovld vstore16(half16 data,
size_t offset, __private half *p);
12085 #endif //cl_khr_fp16 12086 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 12097 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 12103 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 12118 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 12140 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 12153 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 12165 #endif //cl_khr_fp64 12198 #endif //cl_khr_fp64 12199 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 12212 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 12264 #endif //cl_khr_fp64 12417 #endif //cl_khr_fp64 12418 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 12439 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 12465 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 12483 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 12554 #endif //cl_khr_fp64 12767 #endif //cl_khr_fp64 12768 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 12779 #define CLK_LOCAL_MEM_FENCE 0x01 12785 #define CLK_GLOBAL_MEM_FENCE 0x02 12787 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 12793 #define CLK_IMAGE_MEM_FENCE 0x04 12794 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 12828 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 12835 #if defined(cl_intel_subgroups) || defined(cl_khr_subgroups) 12836 memory_scope_sub_group = __OPENCL_MEMORY_SCOPE_SUB_GROUP
12842 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 12887 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 12898 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 13050 #endif //cl_khr_fp64 13064 #endif //cl_khr_fp16 13213 #endif //cl_khr_fp64 13227 #endif //cl_khr_fp16 13249 void __ovld prefetch(
const __global
char *p,
size_t num_elements);
13251 void __ovld prefetch(
const __global
short *p,
size_t num_elements);
13253 void __ovld prefetch(
const __global
int *p,
size_t num_elements);
13255 void __ovld prefetch(
const __global
long *p,
size_t num_elements);
13257 void __ovld prefetch(
const __global
float *p,
size_t num_elements);
13258 void __ovld prefetch(
const __global char2 *p,
size_t num_elements);
13259 void __ovld prefetch(
const __global uchar2 *p,
size_t num_elements);
13260 void __ovld prefetch(
const __global short2 *p,
size_t num_elements);
13261 void __ovld prefetch(
const __global ushort2 *p,
size_t num_elements);
13262 void __ovld prefetch(
const __global int2 *p,
size_t num_elements);
13263 void __ovld prefetch(
const __global uint2 *p,
size_t num_elements);
13264 void __ovld prefetch(
const __global long2 *p,
size_t num_elements);
13265 void __ovld prefetch(
const __global ulong2 *p,
size_t num_elements);
13266 void __ovld prefetch(
const __global float2 *p,
size_t num_elements);
13267 void __ovld prefetch(
const __global char3 *p,
size_t num_elements);
13268 void __ovld prefetch(
const __global uchar3 *p,
size_t num_elements);
13269 void __ovld prefetch(
const __global short3 *p,
size_t num_elements);
13270 void __ovld prefetch(
const __global ushort3 *p,
size_t num_elements);
13271 void __ovld prefetch(
const __global int3 *p,
size_t num_elements);
13272 void __ovld prefetch(
const __global uint3 *p,
size_t num_elements);
13273 void __ovld prefetch(
const __global long3 *p,
size_t num_elements);
13274 void __ovld prefetch(
const __global ulong3 *p,
size_t num_elements);
13275 void __ovld prefetch(
const __global float3 *p,
size_t num_elements);
13276 void __ovld prefetch(
const __global char4 *p,
size_t num_elements);
13277 void __ovld prefetch(
const __global uchar4 *p,
size_t num_elements);
13278 void __ovld prefetch(
const __global short4 *p,
size_t num_elements);
13279 void __ovld prefetch(
const __global ushort4 *p,
size_t num_elements);
13280 void __ovld prefetch(
const __global int4 *p,
size_t num_elements);
13281 void __ovld prefetch(
const __global uint4 *p,
size_t num_elements);
13282 void __ovld prefetch(
const __global long4 *p,
size_t num_elements);
13283 void __ovld prefetch(
const __global ulong4 *p,
size_t num_elements);
13284 void __ovld prefetch(
const __global float4 *p,
size_t num_elements);
13285 void __ovld prefetch(
const __global char8 *p,
size_t num_elements);
13286 void __ovld prefetch(
const __global uchar8 *p,
size_t num_elements);
13287 void __ovld prefetch(
const __global short8 *p,
size_t num_elements);
13288 void __ovld prefetch(
const __global ushort8 *p,
size_t num_elements);
13289 void __ovld prefetch(
const __global int8 *p,
size_t num_elements);
13290 void __ovld prefetch(
const __global uint8 *p,
size_t num_elements);
13291 void __ovld prefetch(
const __global long8 *p,
size_t num_elements);
13292 void __ovld prefetch(
const __global ulong8 *p,
size_t num_elements);
13293 void __ovld prefetch(
const __global float8 *p,
size_t num_elements);
13294 void __ovld prefetch(
const __global char16 *p,
size_t num_elements);
13295 void __ovld prefetch(
const __global uchar16 *p,
size_t num_elements);
13296 void __ovld prefetch(
const __global short16 *p,
size_t num_elements);
13297 void __ovld prefetch(
const __global ushort16 *p,
size_t num_elements);
13298 void __ovld prefetch(
const __global int16 *p,
size_t num_elements);
13299 void __ovld prefetch(
const __global uint16 *p,
size_t num_elements);
13300 void __ovld prefetch(
const __global long16 *p,
size_t num_elements);
13301 void __ovld prefetch(
const __global ulong16 *p,
size_t num_elements);
13302 void __ovld prefetch(
const __global float16 *p,
size_t num_elements);
13304 void __ovld prefetch(
const __global
double *p,
size_t num_elements);
13305 void __ovld prefetch(
const __global double2 *p,
size_t num_elements);
13306 void __ovld prefetch(
const __global double3 *p,
size_t num_elements);
13307 void __ovld prefetch(
const __global double4 *p,
size_t num_elements);
13308 void __ovld prefetch(
const __global double8 *p,
size_t num_elements);
13309 void __ovld prefetch(
const __global double16 *p,
size_t num_elements);
13310 #endif //cl_khr_fp64 13312 void __ovld prefetch(
const __global half *p,
size_t num_elements);
13313 void __ovld prefetch(
const __global half2 *p,
size_t num_elements);
13314 void __ovld prefetch(
const __global half3 *p,
size_t num_elements);
13315 void __ovld prefetch(
const __global half4 *p,
size_t num_elements);
13316 void __ovld prefetch(
const __global half8 *p,
size_t num_elements);
13317 void __ovld prefetch(
const __global half16 *p,
size_t num_elements);
13318 #endif // cl_khr_fp16 13322 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) 13323 #pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable 13324 #pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable 13333 unsigned int __ovld atomic_add(
volatile __global
unsigned int *p,
unsigned int val);
13335 unsigned int __ovld atomic_add(
volatile __local
unsigned int *p,
unsigned int val);
13337 #if defined(cl_khr_global_int32_base_atomics) 13338 int __ovld atom_add(
volatile __global
int *p,
int val);
13339 unsigned int __ovld atom_add(
volatile __global
unsigned int *p,
unsigned int val);
13341 #if defined(cl_khr_local_int32_base_atomics) 13342 int __ovld atom_add(
volatile __local
int *p,
int val);
13343 unsigned int __ovld atom_add(
volatile __local
unsigned int *p,
unsigned int val);
13346 #if defined(cl_khr_int64_base_atomics) 13347 long __ovld atom_add(
volatile __global
long *p,
long val);
13348 unsigned long __ovld atom_add(
volatile __global
unsigned long *p,
unsigned long val);
13349 long __ovld atom_add(
volatile __local
long *p,
long val);
13350 unsigned long __ovld atom_add(
volatile __local
unsigned long *p,
unsigned long val);
13359 unsigned int __ovld atomic_sub(
volatile __global
unsigned int *p,
unsigned int val);
13361 unsigned int __ovld atomic_sub(
volatile __local
unsigned int *p,
unsigned int val);
13363 #if defined(cl_khr_global_int32_base_atomics) 13364 int __ovld atom_sub(
volatile __global
int *p,
int val);
13365 unsigned int __ovld atom_sub(
volatile __global
unsigned int *p,
unsigned int val);
13367 #if defined(cl_khr_local_int32_base_atomics) 13368 int __ovld atom_sub(
volatile __local
int *p,
int val);
13369 unsigned int __ovld atom_sub(
volatile __local
unsigned int *p,
unsigned int val);
13372 #if defined(cl_khr_int64_base_atomics) 13373 long __ovld atom_sub(
volatile __global
long *p,
long val);
13374 unsigned long __ovld atom_sub(
volatile __global
unsigned long *p,
unsigned long val);
13375 long __ovld atom_sub(
volatile __local
long *p,
long val);
13376 unsigned long __ovld atom_sub(
volatile __local
unsigned long *p,
unsigned long val);
13385 unsigned int __ovld atomic_xchg(
volatile __global
unsigned int *p,
unsigned int val);
13387 unsigned int __ovld atomic_xchg(
volatile __local
unsigned int *p,
unsigned int val);
13391 #if defined(cl_khr_global_int32_base_atomics) 13392 int __ovld atom_xchg(
volatile __global
int *p,
int val);
13393 unsigned int __ovld atom_xchg(
volatile __global
unsigned int *p,
unsigned int val);
13395 #if defined(cl_khr_local_int32_base_atomics) 13396 int __ovld atom_xchg(
volatile __local
int *p,
int val);
13397 unsigned int __ovld atom_xchg(
volatile __local
unsigned int *p,
unsigned int val);
13400 #if defined(cl_khr_int64_base_atomics) 13401 long __ovld atom_xchg(
volatile __global
long *p,
long val);
13402 long __ovld atom_xchg(
volatile __local
long *p,
long val);
13403 unsigned long __ovld atom_xchg(
volatile __global
unsigned long *p,
unsigned long val);
13404 unsigned long __ovld atom_xchg(
volatile __local
unsigned long *p,
unsigned long val);
13418 #if defined(cl_khr_global_int32_base_atomics) 13419 int __ovld atom_inc(
volatile __global
int *p);
13420 unsigned int __ovld atom_inc(
volatile __global
unsigned int *p);
13422 #if defined(cl_khr_local_int32_base_atomics) 13423 int __ovld atom_inc(
volatile __local
int *p);
13424 unsigned int __ovld atom_inc(
volatile __local
unsigned int *p);
13427 #if defined(cl_khr_int64_base_atomics) 13428 long __ovld atom_inc(
volatile __global
long *p);
13429 unsigned long __ovld atom_inc(
volatile __global
unsigned long *p);
13430 long __ovld atom_inc(
volatile __local
long *p);
13431 unsigned long __ovld atom_inc(
volatile __local
unsigned long *p);
13445 #if defined(cl_khr_global_int32_base_atomics) 13446 int __ovld atom_dec(
volatile __global
int *p);
13447 unsigned int __ovld atom_dec(
volatile __global
unsigned int *p);
13449 #if defined(cl_khr_local_int32_base_atomics) 13450 int __ovld atom_dec(
volatile __local
int *p);
13451 unsigned int __ovld atom_dec(
volatile __local
unsigned int *p);
13454 #if defined(cl_khr_int64_base_atomics) 13455 long __ovld atom_dec(
volatile __global
long *p);
13456 unsigned long __ovld atom_dec(
volatile __global
unsigned long *p);
13457 long __ovld atom_dec(
volatile __local
long *p);
13458 unsigned long __ovld atom_dec(
volatile __local
unsigned long *p);
13469 unsigned int __ovld atomic_cmpxchg(
volatile __global
unsigned int *p,
unsigned int cmp,
unsigned int val);
13471 unsigned int __ovld atomic_cmpxchg(
volatile __local
unsigned int *p,
unsigned int cmp,
unsigned int val);
13473 #if defined(cl_khr_global_int32_base_atomics) 13474 int __ovld atom_cmpxchg(
volatile __global
int *p,
int cmp,
int val);
13475 unsigned int __ovld atom_cmpxchg(
volatile __global
unsigned int *p,
unsigned int cmp,
unsigned int val);
13477 #if defined(cl_khr_local_int32_base_atomics) 13478 int __ovld atom_cmpxchg(
volatile __local
int *p,
int cmp,
int val);
13479 unsigned int __ovld atom_cmpxchg(
volatile __local
unsigned int *p,
unsigned int cmp,
unsigned int val);
13482 #if defined(cl_khr_int64_base_atomics) 13483 long __ovld atom_cmpxchg(
volatile __global
long *p,
long cmp,
long val);
13484 unsigned long __ovld atom_cmpxchg(
volatile __global
unsigned long *p,
unsigned long cmp,
unsigned long val);
13485 long __ovld atom_cmpxchg(
volatile __local
long *p,
long cmp,
long val);
13486 unsigned long __ovld atom_cmpxchg(
volatile __local
unsigned long *p,
unsigned long cmp,
unsigned long val);
13497 unsigned int __ovld atomic_min(
volatile __global
unsigned int *p,
unsigned int val);
13499 unsigned int __ovld atomic_min(
volatile __local
unsigned int *p,
unsigned int val);
13501 #if defined(cl_khr_global_int32_extended_atomics) 13502 int __ovld atom_min(
volatile __global
int *p,
int val);
13503 unsigned int __ovld atom_min(
volatile __global
unsigned int *p,
unsigned int val);
13505 #if defined(cl_khr_local_int32_extended_atomics) 13506 int __ovld atom_min(
volatile __local
int *p,
int val);
13507 unsigned int __ovld atom_min(
volatile __local
unsigned int *p,
unsigned int val);
13510 #if defined(cl_khr_int64_extended_atomics) 13511 long __ovld atom_min(
volatile __global
long *p,
long val);
13512 unsigned long __ovld atom_min(
volatile __global
unsigned long *p,
unsigned long val);
13513 long __ovld atom_min(
volatile __local
long *p,
long val);
13514 unsigned long __ovld atom_min(
volatile __local
unsigned long *p,
unsigned long val);
13525 unsigned int __ovld atomic_max(
volatile __global
unsigned int *p,
unsigned int val);
13527 unsigned int __ovld atomic_max(
volatile __local
unsigned int *p,
unsigned int val);
13529 #if defined(cl_khr_global_int32_extended_atomics) 13530 int __ovld atom_max(
volatile __global
int *p,
int val);
13531 unsigned int __ovld atom_max(
volatile __global
unsigned int *p,
unsigned int val);
13533 #if defined(cl_khr_local_int32_extended_atomics) 13534 int __ovld atom_max(
volatile __local
int *p,
int val);
13535 unsigned int __ovld atom_max(
volatile __local
unsigned int *p,
unsigned int val);
13538 #if defined(cl_khr_int64_extended_atomics) 13539 long __ovld atom_max(
volatile __global
long *p,
long val);
13540 unsigned long __ovld atom_max(
volatile __global
unsigned long *p,
unsigned long val);
13541 long __ovld atom_max(
volatile __local
long *p,
long val);
13542 unsigned long __ovld atom_max(
volatile __local
unsigned long *p,
unsigned long val);
13552 unsigned int __ovld atomic_and(
volatile __global
unsigned int *p,
unsigned int val);
13554 unsigned int __ovld atomic_and(
volatile __local
unsigned int *p,
unsigned int val);
13556 #if defined(cl_khr_global_int32_extended_atomics) 13557 int __ovld atom_and(
volatile __global
int *p,
int val);
13558 unsigned int __ovld atom_and(
volatile __global
unsigned int *p,
unsigned int val);
13560 #if defined(cl_khr_local_int32_extended_atomics) 13561 int __ovld atom_and(
volatile __local
int *p,
int val);
13562 unsigned int __ovld atom_and(
volatile __local
unsigned int *p,
unsigned int val);
13565 #if defined(cl_khr_int64_extended_atomics) 13566 long __ovld atom_and(
volatile __global
long *p,
long val);
13567 unsigned long __ovld atom_and(
volatile __global
unsigned long *p,
unsigned long val);
13568 long __ovld atom_and(
volatile __local
long *p,
long val);
13569 unsigned long __ovld atom_and(
volatile __local
unsigned long *p,
unsigned long val);
13579 unsigned int __ovld atomic_or(
volatile __global
unsigned int *p,
unsigned int val);
13581 unsigned int __ovld atomic_or(
volatile __local
unsigned int *p,
unsigned int val);
13583 #if defined(cl_khr_global_int32_extended_atomics) 13584 int __ovld atom_or(
volatile __global
int *p,
int val);
13585 unsigned int __ovld atom_or(
volatile __global
unsigned int *p,
unsigned int val);
13587 #if defined(cl_khr_local_int32_extended_atomics) 13588 int __ovld atom_or(
volatile __local
int *p,
int val);
13589 unsigned int __ovld atom_or(
volatile __local
unsigned int *p,
unsigned int val);
13592 #if defined(cl_khr_int64_extended_atomics) 13593 long __ovld atom_or(
volatile __global
long *p,
long val);
13594 unsigned long __ovld atom_or(
volatile __global
unsigned long *p,
unsigned long val);
13595 long __ovld atom_or(
volatile __local
long *p,
long val);
13596 unsigned long __ovld atom_or(
volatile __local
unsigned long *p,
unsigned long val);
13606 unsigned int __ovld atomic_xor(
volatile __global
unsigned int *p,
unsigned int val);
13608 unsigned int __ovld atomic_xor(
volatile __local
unsigned int *p,
unsigned int val);
13610 #if defined(cl_khr_global_int32_extended_atomics) 13611 int __ovld atom_xor(
volatile __global
int *p,
int val);
13612 unsigned int __ovld atom_xor(
volatile __global
unsigned int *p,
unsigned int val);
13614 #if defined(cl_khr_local_int32_extended_atomics) 13615 int __ovld atom_xor(
volatile __local
int *p,
int val);
13616 unsigned int __ovld atom_xor(
volatile __local
unsigned int *p,
unsigned int val);
13619 #if defined(cl_khr_int64_extended_atomics) 13620 long __ovld atom_xor(
volatile __global
long *p,
long val);
13621 unsigned long __ovld atom_xor(
volatile __global
unsigned long *p,
unsigned long val);
13622 long __ovld atom_xor(
volatile __local
long *p,
long val);
13623 unsigned long __ovld atom_xor(
volatile __local
unsigned long *p,
unsigned long val);
13626 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) 13627 #pragma OPENCL EXTENSION cl_khr_int64_base_atomics : disable 13628 #pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : disable 13633 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 13634 #ifndef ATOMIC_VAR_INIT 13635 #define ATOMIC_VAR_INIT(x) (x) 13636 #endif //ATOMIC_VAR_INIT 13637 #define ATOMIC_FLAG_INIT 0 13650 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) 13651 #pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable 13652 #pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable 13659 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) 13664 #endif //cl_khr_fp64 13721 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) 13770 #endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) 13776 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) 13828 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) 13833 #endif //cl_khr_fp64 13853 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) 13858 #endif //cl_khr_fp64 13878 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) 13883 #endif //cl_khr_fp64 13896 int desired, memory_order success, memory_order failure);
13898 int desired, memory_order success, memory_order failure, memory_scope scope);
13901 uint desired, memory_order success, memory_order failure);
13903 uint desired, memory_order success, memory_order failure, memory_scope scope);
13906 int desired, memory_order success, memory_order failure);
13908 int desired, memory_order success, memory_order failure, memory_scope scope);
13911 uint desired, memory_order success, memory_order failure);
13913 uint desired, memory_order success, memory_order failure, memory_scope scope);
13916 float desired, memory_order success, memory_order failure);
13918 float desired, memory_order success, memory_order failure, memory_scope scope);
13921 float desired, memory_order success, memory_order failure);
13923 float desired, memory_order success, memory_order failure, memory_scope scope);
13924 #if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics) 13928 double desired, memory_order success, memory_order failure);
13930 double desired, memory_order success, memory_order failure, memory_scope scope);
13933 double desired, memory_order success, memory_order failure);
13935 double desired, memory_order success, memory_order failure, memory_scope scope);
13936 #endif //cl_khr_fp64 13939 long desired, memory_order success, memory_order failure);
13941 long desired, memory_order success, memory_order failure, memory_scope scope);
13944 long desired, memory_order success, memory_order failure);
13946 long desired, memory_order success, memory_order failure, memory_scope scope);
13949 ulong desired, memory_order success, memory_order failure);
13951 ulong desired, memory_order success, memory_order failure, memory_scope scope);
13954 ulong desired, memory_order success, memory_order failure);
13956 ulong desired, memory_order success, memory_order failure, memory_scope scope);
13968 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 14214 #endif //cl_khr_fp64 14236 #endif //cl_khr_fp16 14438 #endif //cl_khr_fp64 14460 #endif //cl_khr_fp16 14462 #if __OPENCL_C_VERSION__ >= CL_VERSION_1_2 14465 int printf(__constant
const char* st, ...);
14474 #define CLK_ADDRESS_NONE 0 14475 #define CLK_ADDRESS_CLAMP_TO_EDGE 2 14476 #define CLK_ADDRESS_CLAMP 4 14477 #define CLK_ADDRESS_REPEAT 6 14478 #define CLK_ADDRESS_MIRRORED_REPEAT 8 14483 #define CLK_NORMALIZED_COORDS_FALSE 0 14484 #define CLK_NORMALIZED_COORDS_TRUE 1 14489 #define CLK_FILTER_NEAREST 0x10 14490 #define CLK_FILTER_LINEAR 0x20 14492 #ifdef cl_khr_gl_msaa_sharing 14493 #pragma OPENCL EXTENSION cl_khr_gl_msaa_sharing : enable 14494 #endif //cl_khr_gl_msaa_sharing 14629 #ifdef cl_khr_depth_images 14635 #endif //cl_khr_depth_images 14637 #if defined(cl_khr_gl_msaa_sharing) 14649 #endif //cl_khr_gl_msaa_sharing 14652 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 14653 #ifdef cl_khr_mipmap_image 14679 float4
__purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler,
float coord,
float gradientX,
float gradientY);
14680 int4
__purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler,
float coord,
float gradientX,
float gradientY);
14681 uint4
__purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler,
float coord,
float gradientX,
float gradientY);
14683 float4
__purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord,
float gradientX,
float gradientY);
14684 int4
__purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord,
float gradientX,
float gradientY);
14685 uint4
__purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord,
float gradientX,
float gradientY);
14687 float4
__purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
14688 int4
__purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
14689 uint4
__purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
14691 float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
14693 float4
__purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
14694 int4
__purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
14695 uint4
__purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
14697 float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
14699 float4
__purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
14700 int4
__purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
14701 uint4
__purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
14727 #endif //cl_khr_mipmap_image 14728 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 14754 #ifdef cl_khr_depth_images 14757 #endif //cl_khr_depth_images 14765 half4
__purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler,
int coord);
14766 half4
__purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler,
float coord);
14767 half4
__purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, int2 coord);
14768 half4
__purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, float2 coord);
14769 half4
__purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, int2 coord);
14770 half4
__purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, float2 coord);
14771 half4
__purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, int4 coord);
14772 half4
__purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, float4 coord);
14773 half4
__purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, int4 coord);
14774 half4
__purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, float4 coord);
14775 half4
__purefn __ovld read_imageh(read_only image1d_t image,
int coord);
14776 half4
__purefn __ovld read_imageh(read_only image2d_t image, int2 coord);
14777 half4
__purefn __ovld read_imageh(read_only image3d_t image, int4 coord);
14778 half4
__purefn __ovld read_imageh(read_only image1d_array_t image, int2 coord);
14779 half4
__purefn __ovld read_imageh(read_only image2d_array_t image, int4 coord);
14780 half4
__purefn __ovld read_imageh(read_only image1d_buffer_t image,
int coord);
14781 #endif //cl_khr_fp16 14784 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 14809 #ifdef cl_khr_depth_images 14812 #endif //cl_khr_depth_images 14814 #if cl_khr_gl_msaa_sharing 14825 #endif //cl_khr_gl_msaa_sharing 14827 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 14828 #ifdef cl_khr_mipmap_image 14833 float4
__purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord,
float lod);
14843 float4
__purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord,
float lod);
14853 float4
__purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler,
float coord,
float gradientX,
float gradientY);
14854 int4
__purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler,
float coord,
float gradientX,
float gradientY);
14855 uint4
__purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler,
float coord,
float gradientX,
float gradientY);
14857 float4
__purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord,
float gradientX,
float gradientY);
14858 int4
__purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord,
float gradientX,
float gradientY);
14859 uint4
__purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord,
float gradientX,
float gradientY);
14861 float4
__purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
14862 int4
__purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
14863 uint4
__purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
14865 float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
14867 float4
__purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
14868 int4
__purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
14869 uint4
__purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
14871 float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
14873 float4
__purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
14874 int4
__purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
14875 uint4
__purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
14881 float4
__purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord,
float lod);
14891 float4
__purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord,
float lod);
14900 #endif //cl_khr_mipmap_image 14901 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 14905 half4
__purefn __ovld read_imageh(read_write image1d_t image,
int coord);
14906 half4
__purefn __ovld read_imageh(read_write image2d_t image, int2 coord);
14907 half4
__purefn __ovld read_imageh(read_write image3d_t image, int4 coord);
14908 half4
__purefn __ovld read_imageh(read_write image1d_array_t image, int2 coord);
14909 half4
__purefn __ovld read_imageh(read_write image2d_array_t image, int4 coord);
14910 half4
__purefn __ovld read_imageh(read_write image1d_buffer_t image,
int coord);
14911 #endif //cl_khr_fp16 14912 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 14985 void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, float4 color);
14986 void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int4 color);
14987 void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, uint4 color);
14993 void __ovld write_imagef(write_only image1d_buffer_t image,
int coord, float4 color);
14997 void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord, float4 color);
14998 void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord, int4 color);
14999 void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord, uint4 color);
15001 #ifdef cl_khr_3d_image_writes 15007 #ifdef cl_khr_depth_images 15008 void __ovld write_imagef(write_only image2d_depth_t image, int2 coord,
float color);
15009 void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord,
float color);
15010 #endif //cl_khr_depth_images 15013 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15014 #ifdef cl_khr_mipmap_image 15015 void __ovld write_imagef(write_only image1d_t image,
int coord,
int lod, float4 color);
15016 void __ovld write_imagei(write_only image1d_t image,
int coord,
int lod, int4 color);
15019 void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord,
int lod, float4 color);
15020 void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord,
int lod, int4 color);
15021 void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord,
int lod, uint4 color);
15023 void __ovld write_imagef(write_only image2d_t image, int2 coord,
int lod, float4 color);
15024 void __ovld write_imagei(write_only image2d_t image, int2 coord,
int lod, int4 color);
15027 void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord,
int lod, float4 color);
15028 void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord,
int lod, int4 color);
15029 void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord,
int lod, uint4 color);
15031 void __ovld write_imagef(write_only image2d_depth_t image, int2 coord,
int lod,
float color);
15032 void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord,
int lod,
float color);
15034 #ifdef cl_khr_3d_image_writes 15035 void __ovld write_imagef(write_only image3d_t image, int4 coord,
int lod, float4 color);
15036 void __ovld write_imagei(write_only image3d_t image, int4 coord,
int lod, int4 color);
15039 #endif //cl_khr_mipmap_image 15040 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15044 void __ovld write_imageh(write_only image1d_t image,
int coord, half4 color);
15045 void __ovld write_imageh(write_only image2d_t image, int2 coord, half4 color);
15046 #ifdef cl_khr_3d_image_writes 15047 void __ovld write_imageh(write_only image3d_t image, int4 coord, half4 color);
15049 void __ovld write_imageh(write_only image1d_array_t image, int2 coord, half4 color);
15050 void __ovld write_imageh(write_only image2d_array_t image, int4 coord, half4 color);
15051 void __ovld write_imageh(write_only image1d_buffer_t image,
int coord, half4 color);
15052 #endif //cl_khr_fp16 15055 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15060 void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord, float4 color);
15061 void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord, int4 color);
15062 void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord, uint4 color);
15068 void __ovld write_imagef(read_write image1d_buffer_t image,
int coord, float4 color);
15072 void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord, float4 color);
15073 void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord, int4 color);
15074 void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord, uint4 color);
15076 #ifdef cl_khr_3d_image_writes 15082 #ifdef cl_khr_depth_images 15083 void __ovld write_imagef(read_write image2d_depth_t image, int2 coord,
float color);
15084 void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord,
float color);
15085 #endif //cl_khr_depth_images 15087 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15088 #ifdef cl_khr_mipmap_image 15089 void __ovld write_imagef(read_write image1d_t image,
int coord,
int lod, float4 color);
15090 void __ovld write_imagei(read_write image1d_t image,
int coord,
int lod, int4 color);
15093 void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord,
int lod, float4 color);
15094 void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord,
int lod, int4 color);
15095 void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord,
int lod, uint4 color);
15097 void __ovld write_imagef(read_write image2d_t image, int2 coord,
int lod, float4 color);
15098 void __ovld write_imagei(read_write image2d_t image, int2 coord,
int lod, int4 color);
15101 void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord,
int lod, float4 color);
15102 void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord,
int lod, int4 color);
15103 void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord,
int lod, uint4 color);
15105 void __ovld write_imagef(read_write image2d_depth_t image, int2 coord,
int lod,
float color);
15106 void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord,
int lod,
float color);
15108 #ifdef cl_khr_3d_image_writes 15109 void __ovld write_imagef(read_write image3d_t image, int4 coord,
int lod, float4 color);
15110 void __ovld write_imagei(read_write image3d_t image, int4 coord,
int lod, int4 color);
15113 #endif //cl_khr_mipmap_image 15114 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15118 void __ovld write_imageh(read_write image1d_t image,
int coord, half4 color);
15119 void __ovld write_imageh(read_write image2d_t image, int2 coord, half4 color);
15120 #ifdef cl_khr_3d_image_writes 15121 void __ovld write_imageh(read_write image3d_t image, int4 coord, half4 color);
15123 void __ovld write_imageh(read_write image1d_array_t image, int2 coord, half4 color);
15124 void __ovld write_imageh(read_write image2d_array_t image, int4 coord, half4 color);
15125 void __ovld write_imageh(read_write image1d_buffer_t image,
int coord, half4 color);
15126 #endif //cl_khr_fp16 15127 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15140 #ifdef cl_khr_3d_image_writes 15145 #ifdef cl_khr_depth_images 15148 #endif //cl_khr_depth_images 15149 #if defined(cl_khr_gl_msaa_sharing) 15154 #endif //cl_khr_gl_msaa_sharing 15159 #ifdef cl_khr_3d_image_writes 15164 #ifdef cl_khr_depth_images 15167 #endif //cl_khr_depth_images 15168 #if defined(cl_khr_gl_msaa_sharing) 15173 #endif //cl_khr_gl_msaa_sharing 15175 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15182 #ifdef cl_khr_depth_images 15185 #endif //cl_khr_depth_images 15186 #if defined(cl_khr_gl_msaa_sharing) 15191 #endif //cl_khr_gl_msaa_sharing 15192 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15200 #ifdef cl_khr_depth_images 15203 #endif //cl_khr_depth_images 15204 #if defined(cl_khr_gl_msaa_sharing) 15209 #endif //cl_khr_gl_msaa_sharing 15212 #ifdef cl_khr_3d_image_writes 15216 #ifdef cl_khr_depth_images 15219 #endif //cl_khr_depth_images 15220 #if defined(cl_khr_gl_msaa_sharing) 15225 #endif //cl_khr_gl_msaa_sharing 15227 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15231 #ifdef cl_khr_depth_images 15234 #endif //cl_khr_depth_images 15235 #if defined(cl_khr_gl_msaa_sharing) 15240 #endif //cl_khr_gl_msaa_sharing 15241 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15248 #ifdef cl_khr_3d_image_writes 15252 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15254 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15257 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15258 #ifdef cl_khr_mipmap_image 15263 int __ovld get_image_num_mip_levels(read_only image1d_t image);
15264 int __ovld get_image_num_mip_levels(read_only image2d_t image);
15265 int __ovld get_image_num_mip_levels(read_only image3d_t image);
15267 int __ovld get_image_num_mip_levels(write_only image1d_t image);
15268 int __ovld get_image_num_mip_levels(write_only image2d_t image);
15269 #ifdef cl_khr_3d_image_writes 15270 int __ovld get_image_num_mip_levels(write_only image3d_t image);
15273 int __ovld get_image_num_mip_levels(read_write image1d_t image);
15274 int __ovld get_image_num_mip_levels(read_write image2d_t image);
15275 int __ovld get_image_num_mip_levels(read_write image3d_t image);
15277 int __ovld get_image_num_mip_levels(read_only image1d_array_t image);
15278 int __ovld get_image_num_mip_levels(read_only image2d_array_t image);
15279 int __ovld get_image_num_mip_levels(read_only image2d_array_depth_t image);
15280 int __ovld get_image_num_mip_levels(read_only image2d_depth_t image);
15282 int __ovld get_image_num_mip_levels(write_only image1d_array_t image);
15283 int __ovld get_image_num_mip_levels(write_only image2d_array_t image);
15284 int __ovld get_image_num_mip_levels(write_only image2d_array_depth_t image);
15285 int __ovld get_image_num_mip_levels(write_only image2d_depth_t image);
15287 int __ovld get_image_num_mip_levels(read_write image1d_array_t image);
15288 int __ovld get_image_num_mip_levels(read_write image2d_array_t image);
15289 int __ovld get_image_num_mip_levels(read_write image2d_array_depth_t image);
15290 int __ovld get_image_num_mip_levels(read_write image2d_depth_t image);
15292 #endif //cl_khr_mipmap_image 15293 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15317 #define CLK_SNORM_INT8 0x10D0 15318 #define CLK_SNORM_INT16 0x10D1 15319 #define CLK_UNORM_INT8 0x10D2 15320 #define CLK_UNORM_INT16 0x10D3 15321 #define CLK_UNORM_SHORT_565 0x10D4 15322 #define CLK_UNORM_SHORT_555 0x10D5 15323 #define CLK_UNORM_INT_101010 0x10D6 15324 #define CLK_SIGNED_INT8 0x10D7 15325 #define CLK_SIGNED_INT16 0x10D8 15326 #define CLK_SIGNED_INT32 0x10D9 15327 #define CLK_UNSIGNED_INT8 0x10DA 15328 #define CLK_UNSIGNED_INT16 0x10DB 15329 #define CLK_UNSIGNED_INT32 0x10DC 15330 #define CLK_HALF_FLOAT 0x10DD 15331 #define CLK_FLOAT 0x10DE 15332 #define CLK_UNORM_INT24 0x10DF 15340 #ifdef cl_khr_depth_images 15343 #endif //cl_khr_depth_images 15344 #if defined(cl_khr_gl_msaa_sharing) 15349 #endif //cl_khr_gl_msaa_sharing 15354 #ifdef cl_khr_3d_image_writes 15359 #ifdef cl_khr_depth_images 15362 #endif //cl_khr_depth_images 15363 #if defined(cl_khr_gl_msaa_sharing) 15368 #endif //cl_khr_gl_msaa_sharing 15370 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15377 #ifdef cl_khr_depth_images 15380 #endif //cl_khr_depth_images 15381 #if defined(cl_khr_gl_msaa_sharing) 15386 #endif //cl_khr_gl_msaa_sharing 15387 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15407 #define CLK_R 0x10B0 15408 #define CLK_A 0x10B1 15409 #define CLK_RG 0x10B2 15410 #define CLK_RA 0x10B3 15411 #define CLK_RGB 0x10B4 15412 #define CLK_RGBA 0x10B5 15413 #define CLK_BGRA 0x10B6 15414 #define CLK_ARGB 0x10B7 15415 #define CLK_INTENSITY 0x10B8 15416 #define CLK_LUMINANCE 0x10B9 15417 #define CLK_Rx 0x10BA 15418 #define CLK_RGx 0x10BB 15419 #define CLK_RGBx 0x10BC 15420 #define CLK_DEPTH 0x10BD 15421 #define CLK_DEPTH_STENCIL 0x10BE 15422 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15423 #define CLK_sRGB 0x10BF 15424 #define CLK_sRGBA 0x10C1 15425 #define CLK_sRGBx 0x10C0 15426 #define CLK_sBGRA 0x10C2 15427 #define CLK_ABGR 0x10C3 15428 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15436 #ifdef cl_khr_depth_images 15439 #endif //cl_khr_depth_images 15440 #if defined(cl_khr_gl_msaa_sharing) 15445 #endif //cl_khr_gl_msaa_sharing 15450 #ifdef cl_khr_3d_image_writes 15455 #ifdef cl_khr_depth_images 15458 #endif //cl_khr_depth_images 15459 #if defined(cl_khr_gl_msaa_sharing) 15464 #endif //cl_khr_gl_msaa_sharing 15466 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15473 #ifdef cl_khr_depth_images 15476 #endif //cl_khr_depth_images 15477 #if defined(cl_khr_gl_msaa_sharing) 15482 #endif //cl_khr_gl_msaa_sharing 15483 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15492 #ifdef cl_khr_depth_images 15495 #endif //cl_khr_depth_images 15496 #if defined(cl_khr_gl_msaa_sharing) 15501 #endif //cl_khr_gl_msaa_sharing 15505 #ifdef cl_khr_depth_images 15508 #endif //cl_khr_depth_images 15509 #if defined(cl_khr_gl_msaa_sharing) 15514 #endif //cl_khr_gl_msaa_sharing 15516 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15519 #ifdef cl_khr_depth_images 15522 #endif //cl_khr_depth_images 15523 #if defined(cl_khr_gl_msaa_sharing) 15528 #endif //cl_khr_gl_msaa_sharing 15529 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15538 #ifdef cl_khr_3d_image_writes 15541 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15543 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15551 #ifdef cl_khr_depth_images 15553 #endif //cl_khr_depth_images 15554 #if defined(cl_khr_gl_msaa_sharing) 15557 #endif //cl_khr_gl_msaa_sharing 15561 #ifdef cl_khr_depth_images 15563 #endif //cl_khr_depth_images 15564 #if defined(cl_khr_gl_msaa_sharing) 15567 #endif //cl_khr_gl_msaa_sharing 15569 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15572 #ifdef cl_khr_depth_images 15574 #endif //cl_khr_depth_images 15575 #if defined(cl_khr_gl_msaa_sharing) 15578 #endif //cl_khr_gl_msaa_sharing 15579 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15584 #if defined(cl_khr_gl_msaa_sharing) 15585 int __ovld get_image_num_samples(read_only image2d_msaa_t image);
15586 int __ovld get_image_num_samples(read_only image2d_msaa_depth_t image);
15587 int __ovld get_image_num_samples(read_only image2d_array_msaa_depth_t image);
15588 int __ovld get_image_num_samples(read_only image2d_array_msaa_t image);
15589 int __ovld get_image_num_samples(read_only image2d_array_msaa_depth_t image);
15591 int __ovld get_image_num_samples(write_only image2d_msaa_t image);
15592 int __ovld get_image_num_samples(write_only image2d_msaa_depth_t image);
15593 int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image);
15594 int __ovld get_image_num_samples(write_only image2d_array_msaa_t image);
15595 int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image);
15597 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15598 int __ovld get_image_num_samples(read_write image2d_msaa_t image);
15599 int __ovld get_image_num_samples(read_write image2d_msaa_depth_t image);
15600 int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image);
15601 int __ovld get_image_num_samples(read_write image2d_array_msaa_t image);
15602 int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image);
15603 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15608 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15636 #endif //cl_khr_fp64 15704 #endif //cl_khr_fp64 15706 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15709 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15710 #define PIPE_RESERVE_ID_VALID_BIT (1U << 30) 15711 #define CLK_NULL_RESERVE_ID (__builtin_astype(((void*)(__SIZE_MAX__)), reserve_id_t)) 15713 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15717 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15719 #define CL_COMPLETE 0x0 15720 #define CL_RUNNING 0x1 15721 #define CL_SUBMITTED 0x2 15722 #define CL_QUEUED 0x3 15724 #define CLK_SUCCESS 0 15725 #define CLK_ENQUEUE_FAILURE -101 15726 #define CLK_INVALID_QUEUE -102 15727 #define CLK_INVALID_NDRANGE -160 15728 #define CLK_INVALID_EVENT_WAIT_LIST -57 15729 #define CLK_DEVICE_QUEUE_FULL -161 15730 #define CLK_INVALID_ARG_SIZE -51 15731 #define CLK_EVENT_ALLOCATION_FAILURE -100 15732 #define CLK_OUT_OF_RESOURCES -5 15734 #define CLK_NULL_QUEUE 0 15735 #define CLK_NULL_EVENT (__builtin_astype(((void*)(__SIZE_MAX__)), clk_event_t)) 15738 #define CLK_ENQUEUE_FLAGS_NO_WAIT 0x0 15739 #define CLK_ENQUEUE_FLAGS_WAIT_KERNEL 0x1 15740 #define CLK_ENQUEUE_FLAGS_WAIT_WORK_GROUP 0x2 15746 #define CLK_PROFILING_COMMAND_EXEC_TIME 0x1 15748 #define MAX_WORK_DIM 3 15784 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15788 #if defined(cl_intel_subgroups) || defined(cl_khr_subgroups) 15793 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15794 uint __ovld get_enqueued_num_sub_groups(
void);
15795 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15799 void __ovld __conv sub_group_barrier(cl_mem_fence_flags flags);
15800 #if __OPENCL_C_VERSION__ >= CL_VERSION_2_0 15801 void __ovld __conv sub_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
15802 #endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0 15811 float __ovld __conv sub_group_broadcast(
float x,
uint sub_group_local_id);
15829 int __ovld __conv sub_group_scan_exclusive_add(
int x);
15831 long __ovld __conv sub_group_scan_exclusive_add(
long x);
15833 float __ovld __conv sub_group_scan_exclusive_add(
float x);
15834 int __ovld __conv sub_group_scan_exclusive_min(
int x);
15836 long __ovld __conv sub_group_scan_exclusive_min(
long x);
15838 float __ovld __conv sub_group_scan_exclusive_min(
float x);
15839 int __ovld __conv sub_group_scan_exclusive_max(
int x);
15841 long __ovld __conv sub_group_scan_exclusive_max(
long x);
15843 float __ovld __conv sub_group_scan_exclusive_max(
float x);
15845 int __ovld __conv sub_group_scan_inclusive_add(
int x);
15847 long __ovld __conv sub_group_scan_inclusive_add(
long x);
15849 float __ovld __conv sub_group_scan_inclusive_add(
float x);
15850 int __ovld __conv sub_group_scan_inclusive_min(
int x);
15852 long __ovld __conv sub_group_scan_inclusive_min(
long x);
15854 float __ovld __conv sub_group_scan_inclusive_min(
float x);
15855 int __ovld __conv sub_group_scan_inclusive_max(
int x);
15857 long __ovld __conv sub_group_scan_inclusive_max(
long x);
15859 float __ovld __conv sub_group_scan_inclusive_max(
float x);
15866 half
__ovld __conv sub_group_scan_exclusive_add(half x);
15867 half
__ovld __conv sub_group_scan_exclusive_min(half x);
15868 half
__ovld __conv sub_group_scan_exclusive_max(half x);
15869 half
__ovld __conv sub_group_scan_inclusive_add(half x);
15870 half
__ovld __conv sub_group_scan_inclusive_min(half x);
15871 half
__ovld __conv sub_group_scan_inclusive_max(half x);
15872 #endif //cl_khr_fp16 15875 double __ovld __conv sub_group_broadcast(
double x,
uint sub_group_local_id);
15879 double __ovld __conv sub_group_scan_exclusive_add(
double x);
15880 double __ovld __conv sub_group_scan_exclusive_min(
double x);
15881 double __ovld __conv sub_group_scan_exclusive_max(
double x);
15882 double __ovld __conv sub_group_scan_inclusive_add(
double x);
15883 double __ovld __conv sub_group_scan_inclusive_min(
double x);
15884 double __ovld __conv sub_group_scan_inclusive_max(
double x);
15885 #endif //cl_khr_fp64 15887 #endif //cl_khr_subgroups cl_intel_subgroups 15889 #if defined(cl_intel_subgroups) 15915 float __ovld __conv intel_sub_group_shuffle_down(
float cur,
float next,
uint c );
15916 float2
__ovld __conv intel_sub_group_shuffle_down( float2 cur, float2 next,
uint c );
15917 float3
__ovld __conv intel_sub_group_shuffle_down( float3 cur, float3 next,
uint c );
15918 float4
__ovld __conv intel_sub_group_shuffle_down( float4 cur, float4 next,
uint c );
15919 float8
__ovld __conv intel_sub_group_shuffle_down( float8 cur, float8 next,
uint c );
15920 float16
__ovld __conv intel_sub_group_shuffle_down( float16 cur, float16 next,
uint c );
15922 int __ovld __conv intel_sub_group_shuffle_down(
int cur,
int next,
uint c );
15923 int2
__ovld __conv intel_sub_group_shuffle_down( int2 cur, int2 next,
uint c );
15924 int3
__ovld __conv intel_sub_group_shuffle_down( int3 cur, int3 next,
uint c );
15925 int4
__ovld __conv intel_sub_group_shuffle_down( int4 cur, int4 next,
uint c );
15926 int8
__ovld __conv intel_sub_group_shuffle_down( int8 cur, int8 next,
uint c );
15927 int16
__ovld __conv intel_sub_group_shuffle_down( int16 cur, int16 next,
uint c );
15930 uint2
__ovld __conv intel_sub_group_shuffle_down( uint2 cur, uint2 next,
uint c );
15931 uint3
__ovld __conv intel_sub_group_shuffle_down( uint3 cur, uint3 next,
uint c );
15932 uint4
__ovld __conv intel_sub_group_shuffle_down( uint4 cur, uint4 next,
uint c );
15933 uint8
__ovld __conv intel_sub_group_shuffle_down( uint8 cur, uint8 next,
uint c );
15934 uint16
__ovld __conv intel_sub_group_shuffle_down( uint16 cur, uint16 next,
uint c );
15936 long __ovld __conv intel_sub_group_shuffle_down(
long prev,
long cur,
uint c );
15939 float __ovld __conv intel_sub_group_shuffle_up(
float prev,
float cur,
uint c );
15940 float2
__ovld __conv intel_sub_group_shuffle_up( float2 prev, float2 cur,
uint c );
15941 float3
__ovld __conv intel_sub_group_shuffle_up( float3 prev, float3 cur,
uint c );
15942 float4
__ovld __conv intel_sub_group_shuffle_up( float4 prev, float4 cur,
uint c );
15943 float8
__ovld __conv intel_sub_group_shuffle_up( float8 prev, float8 cur,
uint c );
15944 float16
__ovld __conv intel_sub_group_shuffle_up( float16 prev, float16 cur,
uint c );
15946 int __ovld __conv intel_sub_group_shuffle_up(
int prev,
int cur,
uint c );
15947 int2
__ovld __conv intel_sub_group_shuffle_up( int2 prev, int2 cur,
uint c );
15948 int3
__ovld __conv intel_sub_group_shuffle_up( int3 prev, int3 cur,
uint c );
15949 int4
__ovld __conv intel_sub_group_shuffle_up( int4 prev, int4 cur,
uint c );
15950 int8
__ovld __conv intel_sub_group_shuffle_up( int8 prev, int8 cur,
uint c );
15951 int16
__ovld __conv intel_sub_group_shuffle_up( int16 prev, int16 cur,
uint c );
15954 uint2
__ovld __conv intel_sub_group_shuffle_up( uint2 prev, uint2 cur,
uint c );
15955 uint3
__ovld __conv intel_sub_group_shuffle_up( uint3 prev, uint3 cur,
uint c );
15956 uint4
__ovld __conv intel_sub_group_shuffle_up( uint4 prev, uint4 cur,
uint c );
15957 uint8
__ovld __conv intel_sub_group_shuffle_up( uint8 prev, uint8 cur,
uint c );
15958 uint16
__ovld __conv intel_sub_group_shuffle_up( uint16 prev, uint16 cur,
uint c );
15960 long __ovld __conv intel_sub_group_shuffle_up(
long prev,
long cur,
uint c );
15987 uint __ovld __conv intel_sub_group_block_read( read_only image2d_t image, int2 coord );
15988 uint2
__ovld __conv intel_sub_group_block_read2( read_only image2d_t image, int2 coord );
15989 uint4
__ovld __conv intel_sub_group_block_read4( read_only image2d_t image, int2 coord );
15990 uint8
__ovld __conv intel_sub_group_block_read8( read_only image2d_t image, int2 coord );
15992 #if (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) 15993 uint __ovld __conv intel_sub_group_block_read(read_write image2d_t image, int2 coord);
15994 uint2
__ovld __conv intel_sub_group_block_read2(read_write image2d_t image, int2 coord);
15995 uint4
__ovld __conv intel_sub_group_block_read4(read_write image2d_t image, int2 coord);
15996 uint8
__ovld __conv intel_sub_group_block_read8(read_write image2d_t image, int2 coord);
15997 #endif // (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) 16000 uint2
__ovld __conv intel_sub_group_block_read2(
const __global
uint* p );
16001 uint4
__ovld __conv intel_sub_group_block_read4(
const __global
uint* p );
16002 uint8
__ovld __conv intel_sub_group_block_read8(
const __global
uint* p );
16004 void __ovld __conv intel_sub_group_block_write(write_only image2d_t image, int2 coord,
uint data);
16005 void __ovld __conv intel_sub_group_block_write2(write_only image2d_t image, int2 coord, uint2 data);
16006 void __ovld __conv intel_sub_group_block_write4(write_only image2d_t image, int2 coord, uint4 data);
16007 void __ovld __conv intel_sub_group_block_write8(write_only image2d_t image, int2 coord, uint8 data);
16009 #if (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) 16010 void __ovld __conv intel_sub_group_block_write(read_write image2d_t image, int2 coord,
uint data);
16011 void __ovld __conv intel_sub_group_block_write2(read_write image2d_t image, int2 coord, uint2 data);
16012 void __ovld __conv intel_sub_group_block_write4(read_write image2d_t image, int2 coord, uint4 data);
16013 void __ovld __conv intel_sub_group_block_write8(read_write image2d_t image, int2 coord, uint8 data);
16014 #endif // (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) 16017 void __ovld __conv intel_sub_group_block_write2( __global
uint* p, uint2 data );
16018 void __ovld __conv intel_sub_group_block_write4( __global
uint* p, uint4 data );
16019 void __ovld __conv intel_sub_group_block_write8( __global
uint* p, uint8 data );
16023 half
__ovld __conv intel_sub_group_shuffle_down( half prev, half cur,
uint c );
16024 half
__ovld __conv intel_sub_group_shuffle_up( half prev, half cur,
uint c );
16028 #if defined(cl_khr_fp64) 16030 double __ovld __conv intel_sub_group_shuffle_down(
double prev,
double cur,
uint c );
16031 double __ovld __conv intel_sub_group_shuffle_up(
double prev,
double cur,
uint c );
16035 #endif //cl_intel_subgroups 16037 #if defined(cl_intel_subgroups_short) 16038 short __ovld __conv intel_sub_group_broadcast(
short x,
uint sub_group_local_id );
16039 short2
__ovld __conv intel_sub_group_broadcast( short2 x,
uint sub_group_local_id );
16040 short3
__ovld __conv intel_sub_group_broadcast( short3 x,
uint sub_group_local_id );
16041 short4
__ovld __conv intel_sub_group_broadcast( short4 x,
uint sub_group_local_id );
16042 short8
__ovld __conv intel_sub_group_broadcast( short8 x,
uint sub_group_local_id );
16045 ushort2
__ovld __conv intel_sub_group_broadcast( ushort2 x,
uint sub_group_local_id );
16046 ushort3
__ovld __conv intel_sub_group_broadcast( ushort3 x,
uint sub_group_local_id );
16047 ushort4
__ovld __conv intel_sub_group_broadcast( ushort4 x,
uint sub_group_local_id );
16048 ushort8
__ovld __conv intel_sub_group_broadcast( ushort8 x,
uint sub_group_local_id );
16064 short __ovld __conv intel_sub_group_shuffle_down(
short cur,
short next,
uint c );
16065 short2
__ovld __conv intel_sub_group_shuffle_down( short2 cur, short2 next,
uint c );
16066 short3
__ovld __conv intel_sub_group_shuffle_down( short3 cur, short3 next,
uint c );
16067 short4
__ovld __conv intel_sub_group_shuffle_down( short4 cur, short4 next,
uint c );
16068 short8
__ovld __conv intel_sub_group_shuffle_down( short8 cur, short8 next,
uint c );
16069 short16
__ovld __conv intel_sub_group_shuffle_down( short16 cur, short16 next,
uint c );
16072 ushort2
__ovld __conv intel_sub_group_shuffle_down( ushort2 cur, ushort2 next,
uint c );
16073 ushort3
__ovld __conv intel_sub_group_shuffle_down( ushort3 cur, ushort3 next,
uint c );
16074 ushort4
__ovld __conv intel_sub_group_shuffle_down( ushort4 cur, ushort4 next,
uint c );
16075 ushort8
__ovld __conv intel_sub_group_shuffle_down( ushort8 cur, ushort8 next,
uint c );
16076 ushort16
__ovld __conv intel_sub_group_shuffle_down( ushort16 cur, ushort16 next,
uint c );
16078 short __ovld __conv intel_sub_group_shuffle_up(
short cur,
short next,
uint c );
16079 short2
__ovld __conv intel_sub_group_shuffle_up( short2 cur, short2 next,
uint c );
16080 short3
__ovld __conv intel_sub_group_shuffle_up( short3 cur, short3 next,
uint c );
16081 short4
__ovld __conv intel_sub_group_shuffle_up( short4 cur, short4 next,
uint c );
16082 short8
__ovld __conv intel_sub_group_shuffle_up( short8 cur, short8 next,
uint c );
16083 short16
__ovld __conv intel_sub_group_shuffle_up( short16 cur, short16 next,
uint c );
16086 ushort2
__ovld __conv intel_sub_group_shuffle_up( ushort2 cur, ushort2 next,
uint c );
16087 ushort3
__ovld __conv intel_sub_group_shuffle_up( ushort3 cur, ushort3 next,
uint c );
16088 ushort4
__ovld __conv intel_sub_group_shuffle_up( ushort4 cur, ushort4 next,
uint c );
16089 ushort8
__ovld __conv intel_sub_group_shuffle_up( ushort8 cur, ushort8 next,
uint c );
16090 ushort16
__ovld __conv intel_sub_group_shuffle_up( ushort16 cur, ushort16 next,
uint c );
16106 short __ovld __conv intel_sub_group_reduce_add(
short x );
16108 short __ovld __conv intel_sub_group_reduce_min(
short x );
16110 short __ovld __conv intel_sub_group_reduce_max(
short x );
16113 short __ovld __conv intel_sub_group_scan_exclusive_add(
short x );
16115 short __ovld __conv intel_sub_group_scan_exclusive_min(
short x );
16117 short __ovld __conv intel_sub_group_scan_exclusive_max(
short x );
16120 short __ovld __conv intel_sub_group_scan_inclusive_add(
short x );
16122 short __ovld __conv intel_sub_group_scan_inclusive_min(
short x );
16124 short __ovld __conv intel_sub_group_scan_inclusive_max(
short x );
16127 uint __ovld __conv intel_sub_group_block_read_ui( read_only image2d_t image, int2 byte_coord );
16128 uint2
__ovld __conv intel_sub_group_block_read_ui2( read_only image2d_t image, int2 byte_coord );
16129 uint4
__ovld __conv intel_sub_group_block_read_ui4( read_only image2d_t image, int2 byte_coord );
16130 uint8
__ovld __conv intel_sub_group_block_read_ui8( read_only image2d_t image, int2 byte_coord );
16132 #if (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) 16133 uint __ovld __conv intel_sub_group_block_read_ui( read_write image2d_t image, int2 byte_coord );
16134 uint2
__ovld __conv intel_sub_group_block_read_ui2( read_write image2d_t image, int2 byte_coord );
16135 uint4
__ovld __conv intel_sub_group_block_read_ui4( read_write image2d_t image, int2 byte_coord );
16136 uint8
__ovld __conv intel_sub_group_block_read_ui8( read_write image2d_t image, int2 byte_coord );
16137 #endif // (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) 16140 uint2
__ovld __conv intel_sub_group_block_read_ui2(
const __global
uint* p );
16141 uint4
__ovld __conv intel_sub_group_block_read_ui4(
const __global
uint* p );
16142 uint8
__ovld __conv intel_sub_group_block_read_ui8(
const __global
uint* p );
16144 void __ovld __conv intel_sub_group_block_write_ui( read_only image2d_t image, int2 byte_coord,
uint data );
16145 void __ovld __conv intel_sub_group_block_write_ui2( read_only image2d_t image, int2 byte_coord, uint2 data );
16146 void __ovld __conv intel_sub_group_block_write_ui4( read_only image2d_t image, int2 byte_coord, uint4 data );
16147 void __ovld __conv intel_sub_group_block_write_ui8( read_only image2d_t image, int2 byte_coord, uint8 data );
16149 #if (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) 16150 void __ovld __conv intel_sub_group_block_write_ui( read_write image2d_t image, int2 byte_coord,
uint data );
16151 void __ovld __conv intel_sub_group_block_write_ui2( read_write image2d_t image, int2 byte_coord, uint2 data );
16152 void __ovld __conv intel_sub_group_block_write_ui4( read_write image2d_t image, int2 byte_coord, uint4 data );
16153 void __ovld __conv intel_sub_group_block_write_ui8( read_write image2d_t image, int2 byte_coord, uint8 data );
16154 #endif // (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) 16157 void __ovld __conv intel_sub_group_block_write_ui2( __global
uint* p, uint2 data );
16158 void __ovld __conv intel_sub_group_block_write_ui4( __global
uint* p, uint4 data );
16159 void __ovld __conv intel_sub_group_block_write_ui8( __global
uint* p, uint8 data );
16161 ushort __ovld __conv intel_sub_group_block_read_us( read_only image2d_t image, int2 coord );
16162 ushort2
__ovld __conv intel_sub_group_block_read_us2( read_only image2d_t image, int2 coord );
16163 ushort4
__ovld __conv intel_sub_group_block_read_us4( read_only image2d_t image, int2 coord );
16164 ushort8
__ovld __conv intel_sub_group_block_read_us8( read_only image2d_t image, int2 coord );
16166 #if (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) 16167 ushort __ovld __conv intel_sub_group_block_read_us(read_write image2d_t image, int2 coord);
16168 ushort2
__ovld __conv intel_sub_group_block_read_us2(read_write image2d_t image, int2 coord);
16169 ushort4
__ovld __conv intel_sub_group_block_read_us4(read_write image2d_t image, int2 coord);
16170 ushort8
__ovld __conv intel_sub_group_block_read_us8(read_write image2d_t image, int2 coord);
16171 #endif // (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) 16178 void __ovld __conv intel_sub_group_block_write_us(write_only image2d_t image, int2 coord,
ushort data);
16179 void __ovld __conv intel_sub_group_block_write_us2(write_only image2d_t image, int2 coord, ushort2 data);
16180 void __ovld __conv intel_sub_group_block_write_us4(write_only image2d_t image, int2 coord, ushort4 data);
16181 void __ovld __conv intel_sub_group_block_write_us8(write_only image2d_t image, int2 coord, ushort8 data);
16183 #if (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) 16184 void __ovld __conv intel_sub_group_block_write_us(read_write image2d_t image, int2 coord,
ushort data);
16185 void __ovld __conv intel_sub_group_block_write_us2(read_write image2d_t image, int2 coord, ushort2 data);
16186 void __ovld __conv intel_sub_group_block_write_us4(read_write image2d_t image, int2 coord, ushort4 data);
16187 void __ovld __conv intel_sub_group_block_write_us8(read_write image2d_t image, int2 coord, ushort8 data);
16188 #endif // (__OPENCL_C_VERSION__ >= CL_VERSION_2_0) 16191 void __ovld __conv intel_sub_group_block_write_us2( __global
ushort* p, ushort2 data );
16192 void __ovld __conv intel_sub_group_block_write_us4( __global
ushort* p, ushort4 data );
16193 void __ovld __conv intel_sub_group_block_write_us8( __global
ushort* p, ushort8 data );
16194 #endif // cl_intel_subgroups_short 16196 #ifdef cl_amd_media_ops 16198 uint2
__ovld amd_bitalign(uint2 a, uint2 b, uint2 c);
16199 uint3
__ovld amd_bitalign(uint3 a, uint3 b, uint3 c);
16200 uint4
__ovld amd_bitalign(uint4 a, uint4 b, uint4 c);
16201 uint8
__ovld amd_bitalign(uint8 a, uint8 b, uint8 c);
16202 uint16
__ovld amd_bitalign(uint16 a, uint16 b, uint16 c);
16205 uint2
__ovld amd_bytealign(uint2 a, uint2 b, uint2 c);
16206 uint3
__ovld amd_bytealign(uint3 a, uint3 b, uint3 c);
16207 uint4
__ovld amd_bytealign(uint4 a, uint4 b, uint4 c);
16208 uint8
__ovld amd_bytealign(uint8 a, uint8 b, uint8 c);
16209 uint16
__ovld amd_bytealign(uint16 a, uint16 b, uint16 c);
16212 uint2
__ovld amd_lerp(uint2 a, uint2 b, uint2 c);
16213 uint3
__ovld amd_lerp(uint3 a, uint3 b, uint3 c);
16214 uint4
__ovld amd_lerp(uint4 a, uint4 b, uint4 c);
16215 uint8
__ovld amd_lerp(uint8 a, uint8 b, uint8 c);
16216 uint16
__ovld amd_lerp(uint16 a, uint16 b, uint16 c);
16223 uint2
__ovld amd_sadhi(uint2 a, uint2 b, uint2 c);
16224 uint3
__ovld amd_sadhi(uint3 a, uint3 b, uint3 c);
16225 uint4
__ovld amd_sadhi(uint4 a, uint4 b, uint4 c);
16226 uint8
__ovld amd_sadhi(uint8 a, uint8 b, uint8 c);
16227 uint16
__ovld amd_sadhi(uint16 a, uint16 b, uint16 c);
16230 uint2
__ovld amd_sad(uint2 a, uint2 b, uint2 c);
16231 uint3
__ovld amd_sad(uint3 a, uint3 b, uint3 c);
16232 uint4
__ovld amd_sad(uint4 a, uint4 b, uint4 c);
16233 uint8
__ovld amd_sad(uint8 a, uint8 b, uint8 c);
16234 uint16
__ovld amd_sad(uint16 a, uint16 b, uint16 c);
16237 float2
__ovld amd_unpack0(uint2 a);
16238 float3
__ovld amd_unpack0(uint3 a);
16239 float4
__ovld amd_unpack0(uint4 a);
16240 float8
__ovld amd_unpack0(uint8 a);
16241 float16
__ovld amd_unpack0(uint16 a);
16244 float2
__ovld amd_unpack1(uint2 a);
16245 float3
__ovld amd_unpack1(uint3 a);
16246 float4
__ovld amd_unpack1(uint4 a);
16247 float8
__ovld amd_unpack1(uint8 a);
16248 float16
__ovld amd_unpack1(uint16 a);
16251 float2
__ovld amd_unpack2(uint2 a);
16252 float3
__ovld amd_unpack2(uint3 a);
16253 float4
__ovld amd_unpack2(uint4 a);
16254 float8
__ovld amd_unpack2(uint8 a);
16255 float16
__ovld amd_unpack2(uint16 a);
16258 float2
__ovld amd_unpack3(uint2 a);
16259 float3
__ovld amd_unpack3(uint3 a);
16260 float4
__ovld amd_unpack3(uint4 a);
16261 float8
__ovld amd_unpack3(uint8 a);
16262 float16
__ovld amd_unpack3(uint16 a);
16263 #endif // cl_amd_media_ops 16265 #ifdef cl_amd_media_ops2 16267 int2
__ovld amd_bfe(int2 src0, uint2 src1, uint2 src2);
16268 int3
__ovld amd_bfe(int3 src0, uint3 src1, uint3 src2);
16269 int4
__ovld amd_bfe(int4 src0, uint4 src1, uint4 src2);
16270 int8
__ovld amd_bfe(int8 src0, uint8 src1, uint8 src2);
16271 int16
__ovld amd_bfe(int16 src0, uint16 src1, uint16 src2);
16274 uint2
__ovld amd_bfe(uint2 src0, uint2 src1, uint2 src2);
16275 uint3
__ovld amd_bfe(uint3 src0, uint3 src1, uint3 src2);
16276 uint4
__ovld amd_bfe(uint4 src0, uint4 src1, uint4 src2);
16277 uint8
__ovld amd_bfe(uint8 src0, uint8 src1, uint8 src2);
16278 uint16
__ovld amd_bfe(uint16 src0, uint16 src1, uint16 src2);
16281 uint2
__ovld amd_bfm(uint2 src0, uint2 src1);
16282 uint3
__ovld amd_bfm(uint3 src0, uint3 src1);
16283 uint4
__ovld amd_bfm(uint4 src0, uint4 src1);
16284 uint8
__ovld amd_bfm(uint8 src0, uint8 src1);
16285 uint16
__ovld amd_bfm(uint16 src0, uint16 src1);
16287 float __ovld amd_max3(
float src0,
float src1,
float src2);
16288 float2
__ovld amd_max3(float2 src0, float2 src1, float2 src2);
16289 float3
__ovld amd_max3(float3 src0, float3 src1, float3 src2);
16290 float4
__ovld amd_max3(float4 src0, float4 src1, float4 src2);
16291 float8
__ovld amd_max3(float8 src0, float8 src1, float8 src2);
16292 float16
__ovld amd_max3(float16 src0, float16 src1, float16 src2);
16294 int __ovld amd_max3(
int src0,
int src1,
int src2);
16295 int2
__ovld amd_max3(int2 src0, int2 src1, int2 src2);
16296 int3
__ovld amd_max3(int3 src0, int3 src1, int3 src2);
16297 int4
__ovld amd_max3(int4 src0, int4 src1, int4 src2);
16298 int8
__ovld amd_max3(int8 src0, int8 src1, int8 src2);
16299 int16
__ovld amd_max3(int16 src0, int16 src1, int16 src2);
16302 uint2
__ovld amd_max3(uint2 src0, uint2 src1, uint2 src2);
16303 uint3
__ovld amd_max3(uint3 src0, uint3 src1, uint3 src2);
16304 uint4
__ovld amd_max3(uint4 src0, uint4 src1, uint4 src2);
16305 uint8
__ovld amd_max3(uint8 src0, uint8 src1, uint8 src2);
16306 uint16
__ovld amd_max3(uint16 src0, uint16 src1, uint16 src2);
16308 float __ovld amd_median3(
float src0,
float src1,
float src2);
16309 float2
__ovld amd_median3(float2 src0, float2 src1, float2 src2);
16310 float3
__ovld amd_median3(float3 src0, float3 src1, float3 src2);
16311 float4
__ovld amd_median3(float4 src0, float4 src1, float4 src2);
16312 float8
__ovld amd_median3(float8 src0, float8 src1, float8 src2);
16313 float16
__ovld amd_median3(float16 src0, float16 src1, float16 src2);
16315 int __ovld amd_median3(
int src0,
int src1,
int src2);
16316 int2
__ovld amd_median3(int2 src0, int2 src1, int2 src2);
16317 int3
__ovld amd_median3(int3 src0, int3 src1, int3 src2);
16318 int4
__ovld amd_median3(int4 src0, int4 src1, int4 src2);
16319 int8
__ovld amd_median3(int8 src0, int8 src1, int8 src2);
16320 int16
__ovld amd_median3(int16 src0, int16 src1, int16 src2);
16323 uint2
__ovld amd_median3(uint2 src0, uint2 src1, uint2 src2);
16324 uint3
__ovld amd_median3(uint3 src0, uint3 src1, uint3 src2);
16325 uint4
__ovld amd_median3(uint4 src0, uint4 src1, uint4 src2);
16326 uint8
__ovld amd_median3(uint8 src0, uint8 src1, uint8 src2);
16327 uint16
__ovld amd_median3(uint16 src0, uint16 src1, uint16 src2);
16329 float __ovld amd_min3(
float src0,
float src1,
float src);
16330 float2
__ovld amd_min3(float2 src0, float2 src1, float2 src);
16331 float3
__ovld amd_min3(float3 src0, float3 src1, float3 src);
16332 float4
__ovld amd_min3(float4 src0, float4 src1, float4 src);
16333 float8
__ovld amd_min3(float8 src0, float8 src1, float8 src);
16334 float16
__ovld amd_min3(float16 src0, float16 src1, float16 src);
16336 int __ovld amd_min3(
int src0,
int src1,
int src2);
16337 int2
__ovld amd_min3(int2 src0, int2 src1, int2 src2);
16338 int3
__ovld amd_min3(int3 src0, int3 src1, int3 src2);
16339 int4
__ovld amd_min3(int4 src0, int4 src1, int4 src2);
16340 int8
__ovld amd_min3(int8 src0, int8 src1, int8 src2);
16341 int16
__ovld amd_min3(int16 src0, int16 src1, int16 src2);
16344 uint2
__ovld amd_min3(uint2 src0, uint2 src1, uint2 src2);
16345 uint3
__ovld amd_min3(uint3 src0, uint3 src1, uint3 src2);
16346 uint4
__ovld amd_min3(uint4 src0, uint4 src1, uint4 src2);
16347 uint8
__ovld amd_min3(uint8 src0, uint8 src1, uint8 src2);
16348 uint16
__ovld amd_min3(uint16 src0, uint16 src1, uint16 src2);
16351 ulong2
__ovld amd_mqsad(ulong2 src0, uint2 src1, ulong2 src2);
16352 ulong3
__ovld amd_mqsad(ulong3 src0, uint3 src1, ulong3 src2);
16353 ulong4
__ovld amd_mqsad(ulong4 src0, uint4 src1, ulong4 src2);
16354 ulong8
__ovld amd_mqsad(ulong8 src0, uint8 src1, ulong8 src2);
16355 ulong16
__ovld amd_mqsad(ulong16 src0, uint16 src1, ulong16 src2);
16358 ulong2
__ovld amd_qsad(ulong2 src0, uint2 src1, ulong2 src2);
16359 ulong3
__ovld amd_qsad(ulong3 src0, uint3 src1, ulong3 src2);
16360 ulong4
__ovld amd_qsad(ulong4 src0, uint4 src1, ulong4 src2);
16361 ulong8
__ovld amd_qsad(ulong8 src0, uint8 src1, ulong8 src2);
16362 ulong16
__ovld amd_qsad(ulong16 src0, uint16 src1, ulong16 src2);
16365 uint2
__ovld amd_msad(uint2 src0, uint2 src1, uint2 src2);
16366 uint3
__ovld amd_msad(uint3 src0, uint3 src1, uint3 src2);
16367 uint4
__ovld amd_msad(uint4 src0, uint4 src1, uint4 src2);
16368 uint8
__ovld amd_msad(uint8 src0, uint8 src1, uint8 src2);
16369 uint16
__ovld amd_msad(uint16 src0, uint16 src1, uint16 src2);
16372 uint2
__ovld amd_sadd(uint2 src0, uint2 src1, uint2 src2);
16373 uint3
__ovld amd_sadd(uint3 src0, uint3 src1, uint3 src2);
16374 uint4
__ovld amd_sadd(uint4 src0, uint4 src1, uint4 src2);
16375 uint8
__ovld amd_sadd(uint8 src0, uint8 src1, uint8 src2);
16376 uint16
__ovld amd_sadd(uint16 src0, uint16 src1, uint16 src2);
16379 uint2
__ovld amd_sadw(uint2 src0, uint2 src1, uint2 src2);
16380 uint3
__ovld amd_sadw(uint3 src0, uint3 src1, uint3 src2);
16381 uint4
__ovld amd_sadw(uint4 src0, uint4 src1, uint4 src2);
16382 uint8
__ovld amd_sadw(uint8 src0, uint8 src1, uint8 src2);
16383 uint16
__ovld amd_sadw(uint16 src0, uint16 src1, uint16 src2);
16384 #endif // cl_amd_media_ops2 16387 #pragma OPENCL EXTENSION all : disable 16391 #endif //_OPENCL_H_ uchar16 __ovld __cnfn convert_uchar16_rtn(char16)
void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order)
ushort __ovld __cnfn convert_ushort_sat_rtn(char)
uchar __ovld __cnfn convert_uchar_sat_rtz(char)
int16 __ovld __cnfn convert_int16_sat_rtp(char16)
short4 __ovld __cnfn convert_short4_rtp(char4)
float __ovld __cnfn logb(float x)
Compute the exponent of x, which is the integral part of logr | x |.
short16 __ovld __cnfn convert_short16_sat_rtp(char16)
float __ovld __cnfn erfc(float)
Complementary error function.
void __ovld vstore_half3_rtz(float3 data, size_t offset, half *p)
uchar4 __ovld __cnfn convert_uchar4_rte(char4)
float __ovld __cnfn tanh(float)
Compute hyperbolic tangent.
uchar16 __ovld __cnfn convert_uchar16_sat_rte(char16)
ulong8 __ovld __cnfn convert_ulong8_rtz(char8)
float __ovld __cnfn minmag(float x, float y)
Returns x if | x | < | y |, y if | y | < | x |, otherwise fmin(x, y).
long4 __ovld __cnfn convert_long4_sat_rtz(char4)
float __ovld __cnfn half_divide(float x, float y)
Compute x / y.
ushort2 __ovld __cnfn convert_ushort2_rtp(char2)
long2 __ovld __cnfn convert_long2_sat_rtz(char2)
uint3 __ovld __cnfn convert_uint3(char3)
int2 __ovld __cnfn get_image_dim(read_only image2d_t image)
Return the 2D image width and height as an int2 type.
short3 __ovld __cnfn convert_short3_sat_rte(char3)
long __ovld __cnfn convert_long_sat_rte(char)
int16 __ovld __cnfn convert_int16_sat(char16)
float __ovld __cnfn trunc(float)
Round to integral value using the round to zero rounding mode.
int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order)
short16 __ovld __cnfn convert_short16_rte(char16)
uchar4 __ovld __cnfn convert_uchar4_sat_rte(char4)
ulong8 __ovld __cnfn convert_ulong8_rte(char8)
float __ovld __cnfn cospi(float x)
Compute cos (PI * x).
short16 __ovld __cnfn convert_short16_sat_rtn(char16)
float __ovld __cnfn remainder(float x, float y)
Compute the value r such that r = x - n*y, where n is the integer nearest the exact value of x/y...
float3 __ovld __cnfn convert_float3_rtn(char3)
void __ovld write_imagei(write_only image2d_t image, int2 coord, int4 color)
float __ovld __cnfn fmod(float x, float y)
Modulus.
float __ovld __cnfn native_rsqrt(float x)
Compute inverse square root over an implementationdefined range.
float __ovld __cnfn native_exp(float x)
Compute the base- e exponential of x over an implementation-defined range.
char2 __ovld __cnfn convert_char2_rtz(char2)
int __ovld __cnfn convert_int_rte(char)
ulong4 __ovld __cnfn convert_ulong4_sat_rtp(char4)
void __ovld vstore_half8_rtn(float8 data, size_t offset, half *p)
uint3 __ovld __cnfn convert_uint3_sat_rtp(char3)
uint8 __ovld __cnfn convert_uint8_rtn(char8)
bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected, int desired, memory_order success, memory_order failure)
void __ovld vstore_half8(float8 data, size_t offset, half *p)
uchar2 __ovld __cnfn convert_uchar2_sat(char2)
uint8 __ovld __cnfn convert_uint8_sat(char8)
void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order)
short2 __ovld __cnfn convert_short2_sat_rtz(char2)
__SIZE_TYPE__ size_t
The unsigned integer type of the result of the sizeof operator.
ulong3 __ovld __cnfn convert_ulong3_sat_rtp(char3)
int3 __ovld __cnfn convert_int3_sat_rtz(char3)
long4 __ovld __cnfn convert_long4_rtz(char4)
float __ovld __cnfn ceil(float)
Round to integral value using the round to positive infinity rounding mode.
ulong4 __ovld __cnfn convert_ulong4_sat_rtn(char4)
uchar3 __ovld __cnfn convert_uchar3_sat_rtz(char3)
long8 __ovld __cnfn convert_long8_rtn(char8)
int __ovld __cnfn get_image_width(read_only image1d_t image)
Return the image width in pixels.
char3 __ovld __cnfn convert_char3(char3)
float3 __ovld vload_half3(size_t offset, const __constant half *p)
int __ovld __cnfn mul24(int x, int y)
Multiply two 24-bit integer values x and y.
uchar __ovld __cnfn convert_uchar_rtz(char)
ushort16 __ovld __cnfn convert_ushort16_sat_rtn(char16)
int __ovld atomic_or(volatile __global int *p, int val)
Read the 32-bit value (referred to as old) stored at location pointed by p.
void __ovld vstorea_half16_rtn(float16 data, size_t offset, half *p)
ushort __ovld __cnfn convert_ushort_rtn(char)
int __ovld __cnfn all(char x)
Returns 1 if the most significant bit in all components of x is set; otherwise returns 0...
float __ovld __cnfn native_powr(float x, float y)
Compute x to the power y, where x is >= 0.
char __ovld __cnfn convert_char_sat_rte(char)
void __ovld vstorea_half16_rtp(float16 data, size_t offset, half *p)
float __ovld __cnfn tgamma(float)
Compute the gamma function.
uint3 __ovld __cnfn convert_uint3_rte(char3)
ulong __ovld __cnfn convert_ulong_sat_rtp(char)
char4 __ovld __cnfn convert_char4_rtn(char4)
bool __ovld atomic_compare_exchange_weak(volatile atomic_int *object, int *expected, int desired)
long16 __ovld __cnfn convert_long16_sat_rtz(char16)
int __ovld __cnfn isgreater(float x, float y)
Returns the component-wise compare of x > y.
uchar3 __ovld __cnfn convert_uchar3_rtn(char3)
uint16 __ovld __cnfn convert_uint16_sat(char16)
ulong2 __ovld __cnfn convert_ulong2(char2)
short3 __ovld __cnfn convert_short3_sat_rtn(char3)
int __ovld __cnfn signbit(float)
Test for sign bit.
uint __ovld __cnfn convert_uint_sat_rte(char)
ushort3 __ovld __cnfn convert_ushort3_rte(char3)
int4 __ovld __cnfn convert_int4_sat_rtn(char4)
long __ovld __cnfn convert_long_rtp(char)
long3 __ovld __cnfn convert_long3_sat_rtz(char3)
float __ovld __cnfn tanpi(float x)
Compute tan (PI * x).
void __ovld vstorea_half_rtz(float data, size_t offset, half *p)
float8 __ovld __cnfn convert_float8_rte(char8)
float16 __ovld __cnfn convert_float16_rtz(char16)
char4 __ovld __cnfn convert_char4_sat_rtz(char4)
float __ovld __cnfn expm1(float x)
Compute e^x- 1.0.
int __ovld atomic_fetch_add(volatile atomic_int *object, int operand)
queue_t __ovld get_default_queue(void)
int __ovld __conv work_group_scan_inclusive_min(int x)
char16 __ovld vload16(size_t offset, const __constant char *p)
float __ovld __cnfn pown(float x, int y)
Compute x to the power y, where y is an integer.
ulong __ovld __cnfn convert_ulong_rtz(char)
float __ovld __cnfn fmax(float x, float y)
Returns y if x < y, otherwise it returns x.
float __ovld __cnfn nextafter(float x, float y)
Computes the next representable single-precision floating-point value following x in the direction of...
float __ovld __cnfn atan(float y_over_x)
Arc tangent function.
ushort16 __ovld __cnfn convert_ushort16_sat_rte(char16)
int2 __ovld __cnfn convert_int2_sat_rtp(char2)
int16 __ovld __cnfn convert_int16_rtp(char16)
uint3 __ovld __cnfn convert_uint3_rtn(char3)
long16 __ovld __cnfn convert_long16_sat(char16)
int __ovld atomic_fetch_sub(volatile atomic_int *object, int operand)
int __ovld __conv work_group_scan_exclusive_min(int x)
uchar __ovld __cnfn convert_uchar_sat_rtp(char)
float __ovld __cnfn native_tan(float x)
Compute tangent over an implementation-defined range.
ushort3 __ovld __cnfn convert_ushort3_rtz(char3)
uchar3 __ovld __cnfn convert_uchar3_rtp(char3)
float __ovld __cnfn asinpi(float x)
Compute asin (x) / PI.
int __ovld atomic_min(volatile __global int *p, int val)
Read the 32-bit value (referred to as old) stored at location pointed by p.
uchar2 __ovld __cnfn convert_uchar2_sat_rte(char2)
char8 __ovld __cnfn convert_char8_rtp(char8)
float __ovld __cnfn cbrt(float)
Compute cube-root.
bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order)
char __ovld __cnfn clamp(char x, char minval, char maxval)
Returns min(max(x, minval), maxval).
uchar __ovld __cnfn convert_uchar_rtp(char)
float __ovld __cnfn log10(float)
Compute a base 10 logarithm.
uchar2 __ovld __cnfn convert_uchar2_rte(char2)
float __ovld __cnfn half_log10(float x)
Compute a base 10 logarithm.
ndrange_t __ovld ndrange_1D(size_t)
uint2 __ovld __cnfn convert_uint2_sat_rtz(char2)
float __ovld __cnfn native_exp10(float x)
Compute the base- 10 exponential of x over an implementation-defined range.
int __ovld atomic_xor(volatile __global int *p, int val)
Read the 32-bit value (referred to as old) stored at location pointed by p.
float __ovld __cnfn distance(float p0, float p1)
Returns the distance between p0 and p1.
uchar8 __ovld __cnfn convert_uchar8_rte(char8)
char8 __ovld __cnfn convert_char8_sat(char8)
char16 __ovld __cnfn convert_char16(char16)
float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, int2 coord)
Use the coordinate (coord.xy) to do an element lookup in the 2D image object specified by image...
uchar8 __ovld __cnfn convert_uchar8_rtz(char8)
char16 __ovld __cnfn convert_char16_sat_rtp(char16)
char4 __ovld __cnfn convert_char4(char4)
size_t __ovld __cnfn get_global_id(uint dimindx)
Returns the unique global work-item ID value for dimension identified by dimindx. ...
float __ovld __cnfn half_sqrt(float x)
Compute square root.
ushort16 __ovld __cnfn convert_ushort16_rtn(char16)
uchar4 __ovld __cnfn convert_uchar4_rtz(char4)
void __ovld __conv barrier(cl_mem_fence_flags flags)
All work-items in a work-group executing the kernel on a processor must execute this function before ...
ulong2 __ovld __cnfn convert_ulong2_sat_rte(char2)
void __ovld vstore_half3_rtn(float3 data, size_t offset, half *p)
float __ovld __cnfn native_log2(float x)
Compute a base 2 logarithm over an implementationdefined range.
float __ovld __cnfn radians(float degrees)
Converts degrees to radians, i.e.
size_t __ovld __cnfn get_group_id(uint dimindx)
get_group_id returns the work-group ID which is a number from 0 .
uint4 __ovld __cnfn convert_uint4_rtz(char4)
void __ovld vstore_half4_rtz(float4 data, size_t offset, half *p)
void __ovld capture_event_profiling_info(clk_event_t, clk_profiling_info, __global void *value)
uint16 __ovld __cnfn convert_uint16(char16)
char16 __ovld __cnfn convert_char16_rtn(char16)
float __ovld __cnfn erf(float)
Error function encountered in integrating the normal distribution.
uint16 __ovld __cnfn convert_uint16_rte(char16)
float __ovld __cnfn asinh(float)
Inverse hyperbolic sine.
void __ovld vstorea_half3_rtz(float3 data, size_t offset, half *p)
ulong16 __ovld __cnfn convert_ulong16_sat_rtn(char16)
int2 __ovld __cnfn convert_int2_sat_rtz(char2)
char2 __ovld __cnfn convert_char2_sat(char2)
char __ovld __cnfn popcount(char x)
void __ovld vstore_half_rtz(float data, size_t offset, half *p)
uint2 __ovld __cnfn convert_uint2_rtz(char2)
int __ovld atomic_cmpxchg(volatile __global int *p, int cmp, int val)
Read the 32-bit value (referred to as old) stored at location pointed by p.
char __ovld __cnfn convert_char_sat_rtn(char)
void __ovld vstorea_half_rtp(float data, size_t offset, half *p)
long8 __ovld __cnfn convert_long8_sat(char8)
char __ovld __cnfn hadd(char x, char y)
Returns (x + y) >> 1.
void __ovld read_mem_fence(cl_mem_fence_flags flags)
Read memory barrier that orders only loads.
float2 __ovld __cnfn convert_float2_rtp(char2)
ulong4 __ovld __cnfn convert_ulong4_sat(char4)
ushort __ovld __cnfn convert_ushort_sat_rte(char)
void __ovld vstore_half16_rtz(float16 data, size_t offset, half *p)
ulong4 __ovld __cnfn convert_ulong4_sat_rte(char4)
float __ovld __cnfn normalize(float p)
Returns a vector in the same direction as p but with a length of 1.
long8 __ovld __cnfn convert_long8_rtz(char8)
float __ovld __cnfn copysign(float x, float y)
Returns x with its sign changed to match the sign of y.
void __ovld write_imageui(write_only image2d_t image, int2 coord, uint4 color)
unsigned short ushort
An unsigned 16-bit integer.
float __ovld __cnfn mad(float a, float b, float c)
mad approximates a * b + c.
float __ovld __cnfn half_rsqrt(float x)
Compute inverse square root.
uchar2 __ovld __cnfn convert_uchar2_sat_rtn(char2)
ushort16 __ovld __cnfn convert_ushort16_rtz(char16)
char2 __ovld __cnfn convert_char2_rte(char2)
int8 __ovld __cnfn convert_int8_rtz(char8)
long2 __ovld __cnfn convert_long2_rtp(char2)
int3 __ovld __cnfn convert_int3_rte(char3)
long2 __ovld __cnfn convert_long2_rtz(char2)
uint __ovld __cnfn convert_uint_sat_rtz(char)
char8 __ovld vload8(size_t offset, const __constant char *p)
uchar3 __ovld __cnfn convert_uchar3_sat_rtp(char3)
int __ovld __cnfn mad24(int x, int y, int z)
Multiply two 24-bit integer values x and y and add the 32-bit integer result to the 32-bit integer z...
ushort __ovld __cnfn convert_ushort_rtp(char)
size_t __ovld get_global_linear_id(void)
void __ovld prefetch(const __global char *p, size_t num_elements)
Prefetch num_elements * sizeof(gentype) bytes into the global cache.
char2 __ovld __cnfn convert_char2_sat_rte(char2)
ushort2 __ovld __cnfn convert_ushort2_sat_rtp(char2)
ndrange_t __ovld ndrange_2D(const size_t[2])
void __ovld vstore_half8_rtp(float8 data, size_t offset, half *p)
ulong2 __ovld __cnfn convert_ulong2_sat_rtz(char2)
uchar8 __ovld __cnfn convert_uchar8_sat(char8)
clk_event_t __ovld create_user_event(void)
int __ovld __cnfn ilogb(float x)
Return the exponent as an integer value.
float __ovld __cnfn sin(float)
Compute sine.
short __ovld __cnfn convert_short_rtz(char)
uint4 __ovld __cnfn convert_uint4(char4)
bool __ovld atomic_compare_exchange_strong(volatile atomic_int *object, int *expected, int desired)
ulong3 __ovld __cnfn convert_ulong3(char3)
uint __ovld __cnfn convert_uint_sat_rtn(char)
int __ovld __conv work_group_reduce_max(int x)
short8 __ovld __cnfn convert_short8_rtp(char8)
short2 __ovld __cnfn convert_short2_sat(char2)
float __ovld __cnfn native_exp2(float x)
Compute the base- 2 exponential of x over an implementation-defined range.
ulong __ovld __cnfn convert_ulong_sat_rtn(char)
int __ovld __cnfn isfinite(float)
Test for finite value.
short8 __ovld __cnfn convert_short8_sat_rtp(char8)
int8 __ovld __cnfn convert_int8_rtn(char8)
char8 __ovld __cnfn convert_char8_rte(char8)
uchar __ovld __cnfn convert_uchar(char)
ulong16 __ovld __cnfn convert_ulong16_sat_rte(char16)
float __ovld __cnfn nan(uint nancode)
Returns a quiet NaN.
int __ovld __cnfn islessequal(float x, float y)
Returns the component-wise compare of x <= y.
ulong4 __ovld __cnfn convert_ulong4_rtz(char4)
void __ovld vstorea_half4_rtp(float4 data, size_t offset, half *p)
char __ovld __cnfn convert_char_sat_rtz(char)
uchar4 __ovld __cnfn convert_uchar4_rtp(char4)
int __ovld __cnfn isunordered(float x, float y)
Test if arguments are unordered.
char4 __ovld __cnfn convert_char4_sat_rtp(char4)
void __ovld vstore_half2_rtn(float2 data, size_t offset, half *p)
int __ovld atomic_fetch_xor(volatile atomic_int *object, int operand)
ulong2 __ovld __cnfn convert_ulong2_rtz(char2)
long3 __ovld __cnfn convert_long3_rtp(char3)
char __ovld __cnfn mad_hi(char a, char b, char c)
Returns mul_hi(a, b) + c.
uchar3 __ovld __cnfn convert_uchar3_sat(char3)
ulong __ovld __cnfn convert_ulong_sat_rte(char)
i32 captured_struct **param SharedsTy A type which contains references the shared variables *param Shareds Context with the list of shared variables from the p *TaskFunction *param Data Additional data for task generation like final * state
uint __ovld __cnfn convert_uint_rte(char)
long8 __ovld __cnfn convert_long8_sat_rte(char8)
long8 __ovld __cnfn convert_long8(char8)
uint16 __ovld __cnfn convert_uint16_rtz(char16)
long __ovld __cnfn convert_long_sat_rtp(char)
void __ovld vstore3(char3 data, size_t offset, char *p)
uint4 __ovld __cnfn convert_uint4_rte(char4)
float __ovld __cnfn log2(float)
Compute a base 2 logarithm.
uchar16 __ovld __cnfn convert_uchar16_rtz(char16)
uchar4 __ovld __cnfn convert_uchar4_sat(char4)
float16 __ovld __cnfn convert_float16_rtn(char16)
uchar8 __ovld __cnfn convert_uchar8_sat_rte(char8)
float __ovld __cnfn powr(float x, float y)
Compute x to the power y, where x is >= 0.
uint __ovld __cnfn convert_uint_rtp(char)
short16 __ovld __cnfn convert_short16_rtn(char16)
float8 __ovld vloada_half8(size_t offset, const __constant half *p)
short16 __ovld __cnfn convert_short16_rtz(char16)
void __ovld vstore_half2_rtp(float2 data, size_t offset, half *p)
ulong4 __ovld __cnfn convert_ulong4_sat_rtz(char4)
float __ovld __cnfn round(float x)
Return the integral value nearest to x rounding halfway cases away from zero, regardless of the curre...
int __ovld __cnfn isless(float x, float y)
Returns the component-wise compare of x < y.
uchar16 __ovld __cnfn convert_uchar16_sat_rtn(char16)
uint16 __ovld __cnfn convert_uint16_sat_rte(char16)
float __ovld __cnfn tan(float)
Compute tangent.
char4 __ovld __cnfn convert_char4_rte(char4)
void __ovld vstorea_half8_rtp(float8 data, size_t offset, half *p)
void __ovld vstore_half_rtp(float data, size_t offset, half *p)
short8 __ovld __cnfn convert_short8_sat_rtz(char8)
ushort3 __ovld __cnfn convert_ushort3(char3)
void __ovld write_mem_fence(cl_mem_fence_flags flags)
Write memory barrier that orders only stores.
uint3 __ovld __cnfn convert_uint3_sat(char3)
int __ovld atomic_xchg(volatile __global int *p, int val)
Swaps the old value stored at location p with new value given by val.
float4 __ovld __cnfn convert_float4_rtp(char4)
ulong16 __ovld __cnfn convert_ulong16_rtz(char16)
int4 __ovld __cnfn convert_int4_sat(char4)
int __ovld __conv work_group_scan_exclusive_max(int x)
float __ovld sincos(float x, float *cosval)
Compute sine and cosine of x.
float __ovld __cnfn rint(float)
Round to integral value (using round to nearest even rounding mode) in floating-point format...
long3 __ovld __cnfn convert_long3_rte(char3)
void __ovld vstore16(char16 data, size_t offset, char *p)
ushort4 __ovld __cnfn convert_ushort4_sat_rtp(char4)
long16 __ovld __cnfn convert_long16_sat_rtp(char16)
ulong __ovld __cnfn convert_ulong_rtp(char)
uchar __ovld __cnfn convert_uchar_sat_rte(char)
short __ovld __cnfn convert_short_sat(char)
int4 __ovld __cnfn convert_int4_sat_rtz(char4)
uint16 __ovld __cnfn convert_uint16_rtp(char16)
bool __ovld atomic_flag_test_and_set(volatile atomic_flag *object)
int4 __ovld __cnfn convert_int4_rtp(char4)
float __ovld __cnfn degrees(float radians)
Converts radians to degrees, i.e.
int __ovld __conv work_group_scan_inclusive_max(int x)
void __ovld vstore2(char2 data, size_t offset, char *p)
ushort3 __ovld __cnfn convert_ushort3_sat(char3)
long16 __ovld __cnfn convert_long16_rtp(char16)
void __ovld vstore_half_rte(float data, size_t offset, half *p)
void __ovld vstorea_half4(float4 data, size_t offset, half *p)
long3 __ovld __cnfn convert_long3(char3)
long8 __ovld __cnfn convert_long8_rtp(char8)
float4 __ovld __cnfn convert_float4(char4)
int4 __ovld __cnfn convert_int4_sat_rtp(char4)
uchar2 __ovld __cnfn convert_uchar2_rtp(char2)
ulong2 __ovld __cnfn convert_ulong2_rtn(char2)
char16 __ovld __cnfn convert_char16_rtz(char16)
uchar8 __ovld __cnfn convert_uchar8(char8)
uint4 __ovld __cnfn convert_uint4_sat_rte(char4)
int __ovld __cnfn convert_int(char)
void __ovld vstore_half8_rtz(float8 data, size_t offset, half *p)
int2 __ovld __cnfn convert_int2_sat_rte(char2)
char __ovld __cnfn convert_char_rte(char)
void __ovld vstore4(char4 data, size_t offset, char *p)
float3 __ovld __cnfn convert_float3_rtz(char3)
ulong16 __ovld __cnfn convert_ulong16_rtn(char16)
int16 __ovld __cnfn convert_int16_sat_rtz(char16)
ushort8 __ovld __cnfn convert_ushort8_sat(char8)
int __ovld __conv work_group_broadcast(int a, size_t local_id)
char3 __ovld __cnfn convert_char3_sat(char3)
int __ovld __cnfn isequal(float x, float y)
intn isequal (floatn x, floatn y) Returns the component-wise compare of x == y.
void __ovld vstore8(char8 data, size_t offset, char *p)
float __ovld __cnfn log1p(float x)
Compute a base e logarithm of (1.0 + x).
char8 __ovld __cnfn convert_char8_rtz(char8)
char __ovld __cnfn clz(char x)
Returns the number of leading 0-bits in x, starting at the most significant bit position.
float __ovld __cnfn exp10(float)
Exponential base 10 function.
__INTPTR_TYPE__ intptr_t
A signed integer type with the property that any valid pointer to void can be converted to this type...
float __ovld __cnfn half_exp10(float x)
Compute the base- 10 exponential of x.
int3 __ovld __cnfn convert_int3_rtz(char3)
ushort3 __ovld __cnfn convert_ushort3_sat_rtn(char3)
int8 __ovld __cnfn convert_int8_rtp(char8)
short3 __ovld __cnfn convert_short3_sat_rtp(char3)
event_t __ovld async_work_group_copy(__local char *dst, const __global char *src, size_t num_elements, event_t event)
Builtin functions to_global, to_local, and to_private need to be declared as Clang builtin functions ...
char16 __ovld __cnfn convert_char16_sat_rtn(char16)
int __ovld atomic_fetch_or(volatile atomic_int *object, int operand)
int __ovld atomic_fetch_min(volatile atomic_int *object, int operand)
uint2 __ovld __cnfn convert_uint2_sat_rtn(char2)
ulong8 __ovld __cnfn convert_ulong8_rtp(char8)
ushort4 __ovld __cnfn convert_ushort4_rte(char4)
int4 __ovld __cnfn convert_int4(char4)
uint __ovld __cnfn convert_uint(char)
void __ovld vstore_half_rtn(float data, size_t offset, half *p)
int2 __ovld __cnfn convert_int2_rtp(char2)
long2 __ovld __cnfn convert_long2_sat_rtn(char2)
float __ovld __cnfn convert_float_rtz(char)
int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, int2 coord)
void __ovld vstore_half4_rtp(float4 data, size_t offset, half *p)
char __ovld __cnfn mul_hi(char x, char y)
Computes x * y and returns the high half of the product of x and y.
uchar3 __ovld __cnfn convert_uchar3_sat_rtn(char3)
int __ovld __conv work_group_all(int predicate)
Return the number of samples associated with image.
int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order)
short8 __ovld __cnfn convert_short8_rtn(char8)
void __ovld vstore_half16(float16 data, size_t offset, half *p)
void __ovld vstorea_half4_rtn(float4 data, size_t offset, half *p)
long2 __ovld __cnfn convert_long2_sat_rte(char2)
int3 __ovld __cnfn convert_int3_sat(char3)
float __ovld __cnfn log(float)
Compute natural logarithm.
uint3 __ovld __cnfn convert_uint3_sat_rtn(char3)
int __ovld __cnfn isnotequal(float x, float y)
Returns the component-wise compare of x != y.
ushort4 __ovld __cnfn convert_ushort4_sat_rtn(char4)
long8 __ovld __cnfn convert_long8_sat_rtn(char8)
uint8 __ovld __cnfn convert_uint8_rtp(char8)
uchar8 __ovld __cnfn convert_uchar8_rtn(char8)
ushort16 __ovld __cnfn convert_ushort16_sat(char16)
float __ovld modf(float x, float *iptr)
Decompose a floating-point number.
size_t __ovld get_enqueued_local_size(uint dimindx)
ushort2 __ovld __cnfn convert_ushort2_rte(char2)
float4 __ovld __cnfn cross(float4 p0, float4 p1)
Returns the cross product of p0.xyz and p1.xyz.
float3 __ovld __cnfn convert_float3_rte(char3)
int3 __ovld __cnfn convert_int3(char3)
void __ovld mem_fence(cl_mem_fence_flags flags)
Orders loads and stores of a work-item executing a kernel.
ushort3 __ovld __cnfn convert_ushort3_rtn(char3)
ushort4 __ovld __cnfn convert_ushort4_rtn(char4)
long3 __ovld __cnfn convert_long3_rtn(char3)
float16 __ovld __cnfn convert_float16_rtp(char16)
char2 __ovld __cnfn convert_char2(char2)
char2 __ovld __cnfn shuffle(char2 x, uchar2 mask)
The shuffle and shuffle2 built-in functions construct a permutation of elements from one or two input...
float __ovld __cnfn exp(float x)
Compute the base e exponential function of x.
int8 __ovld __cnfn convert_int8(char8)
float4 __ovld vloada_half4(size_t offset, const __constant half *p)
char __ovld __cnfn mad_sat(char a, char b, char c)
Returns a * b + c and saturates the result.
int2 __ovld __cnfn convert_int2_rtn(char2)
float2 __ovld vloada_half2(size_t offset, const __constant half *p)
short __ovld __cnfn convert_short_rtp(char)
int __ovld atomic_inc(volatile __global int *p)
Read the 32-bit value (referred to as old) stored at location pointed by p.
int __ovld atomic_and(volatile __global int *p, int val)
Read the 32-bit value (referred to as old) stored at location pointed by p.
float __ovld fract(float x, float *iptr)
Returns fmin(x - floor (x), 0x1.fffffep-1f ).
uchar __ovld __cnfn convert_uchar_rtn(char)
short4 __ovld __cnfn convert_short4_rtn(char4)
uchar2 __ovld __cnfn convert_uchar2_sat_rtp(char2)
char4 __ovld vload4(size_t offset, const __constant char *p)
float __ovld __cnfn smoothstep(float edge0, float edge1, float x)
Returns 0.0 if x <= edge0 and 1.0 if x >= edge1 and performs smooth Hermite interpolation between 0 a...
uint2 __ovld __cnfn convert_uint2_sat(char2)
ushort __ovld __cnfn convert_ushort_sat_rtp(char)
uint __ovld __cnfn convert_uint_rtn(char)
float __ovld __cnfn convert_float_rtn(char)
char4 __ovld __cnfn convert_char4_sat_rtn(char4)
void __ovld vstore_half3(float3 data, size_t offset, half *p)
char2 __ovld __cnfn convert_char2_rtn(char2)
size_t __ovld __cnfn get_local_id(uint dimindx)
Returns the unique local work-item ID i.e.
char __ovld __cnfn convert_char_rtp(char)
uint2 __ovld __cnfn convert_uint2_sat_rtp(char2)
float __ovld __cnfn acos(float)
Arc cosine function.
uchar8 __ovld __cnfn convert_uchar8_sat_rtp(char8)
void __ovld write_imagef(write_only image2d_t image, int2 coord, float4 color)
Write color value to location specified by coordinate (coord.x, coord.y) in the 2D image object speci...
uint4 __ovld __cnfn convert_uint4_sat_rtz(char4)
char16 __ovld __cnfn convert_char16_sat(char16)
int __ovld __cnfn islessgreater(float x, float y)
Returns the component-wise compare of (x < y) || (x > y) .
long16 __ovld __cnfn convert_long16_sat_rte(char16)
uchar4 __ovld __cnfn convert_uchar4_rtn(char4)
void __ovld __conv work_group_barrier(cl_mem_fence_flags flags, memory_scope scope)
short4 __ovld __cnfn convert_short4(char4)
float8 __ovld vload_half8(size_t offset, const __constant half *p)
ushort16 __ovld __cnfn convert_ushort16_sat_rtp(char16)
ushort __ovld __cnfn convert_ushort_rte(char)
float8 __ovld __cnfn convert_float8(char8)
char2 __ovld __cnfn convert_char2_rtp(char2)
short3 __ovld __cnfn convert_short3_rte(char3)
long16 __ovld __cnfn convert_long16_rte(char16)
ushort8 __ovld __cnfn convert_ushort8_rtp(char8)
uchar4 __ovld __cnfn convert_uchar4_sat_rtz(char4)
float __ovld __cnfn rsqrt(float)
Compute inverse square root.
short16 __ovld __cnfn convert_short16_rtp(char16)
ulong16 __ovld __cnfn convert_ulong16_sat_rtp(char16)
void __ovld atomic_work_item_fence(cl_mem_fence_flags flags, memory_order order, memory_scope scope)
float2 __ovld __cnfn convert_float2(char2)
uint3 __ovld __cnfn convert_uint3_rtz(char3)
float __ovld __cnfn fabs(float)
Compute absolute value of a floating-point number.
char16 __ovld __cnfn convert_char16_rtp(char16)
uchar2 __ovld __cnfn convert_uchar2_sat_rtz(char2)
float2 __ovld vload_half2(size_t offset, const __constant half *p)
Read sizeof (halfn) bytes of data from address (p + (offset * n)).
long __ovld __cnfn convert_long_rtz(char)
void __ovld vstorea_half2_rtn(float2 data, size_t offset, half *p)
int3 __ovld __cnfn convert_int3_rtp(char3)
int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order)
ulong4 __ovld __cnfn convert_ulong4_rtp(char4)
short3 __ovld __cnfn convert_short3_rtn(char3)
size_t __ovld __cnfn get_global_size(uint dimindx)
Returns the number of global work-items specified for dimension identified by dimindx.
ulong16 __ovld __cnfn convert_ulong16_sat(char16)
void __ovld vstorea_half8_rte(float8 data, size_t offset, half *p)
uchar __ovld __cnfn abs_diff(char x, char y)
Returns | x - y | without modulo overflow.
long __ovld __cnfn convert_long_sat_rtz(char)
ulong16 __ovld __cnfn convert_ulong16(char16)
short3 __ovld __cnfn convert_short3_sat(char3)
void __ovld vstore_half16_rte(float16 data, size_t offset, half *p)
char __ovld __cnfn min(char x, char y)
Returns y if y < x, otherwise it returns x.
uchar3 __ovld __cnfn convert_uchar3_rtz(char3)
uchar __ovld __cnfn convert_uchar_rte(char)
long4 __ovld __cnfn convert_long4_sat_rtn(char4)
int __ovld __conv work_group_reduce_min(int x)
unsigned int workDimension
cl_mem_fence_flags __ovld get_fence(const void *ptr)
char3 __ovld __cnfn convert_char3_sat_rtp(char3)
ushort4 __ovld __cnfn convert_ushort4(char4)
long2 __ovld __cnfn convert_long2_rte(char2)
int2 __ovld __cnfn convert_int2(char2)
long __ovld __cnfn convert_long(char)
float __ovld __cnfn native_recip(float x)
Compute reciprocal over an implementation-defined range.
void __ovld vstore_half3_rte(float3 data, size_t offset, half *p)
float __ovld __cnfn asin(float)
Arc sine function.
ulong16 __ovld __cnfn convert_ulong16_rte(char16)
long4 __ovld __cnfn convert_long4_sat(char4)
ushort16 __ovld __cnfn convert_ushort16(char16)
float16 __ovld vload_half16(size_t offset, const __constant half *p)
short3 __ovld __cnfn convert_short3(char3)
void __ovld vstorea_half8_rtz(float8 data, size_t offset, half *p)
int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order)
uchar __ovld __cnfn abs(char x)
Returns | x |.
int8 __ovld __cnfn convert_int8_sat(char8)
int2 __ovld __cnfn convert_int2_rtz(char2)
uint8 __ovld __cnfn convert_uint8_sat_rtz(char8)
float __ovld __cnfn native_sqrt(float x)
Compute square root over an implementation-defined range.
int __ovld atomic_add(volatile __global int *p, int val)
Read the 32-bit value (referred to as old) stored at location pointed by p.
float8 __ovld __cnfn convert_float8_rtz(char8)
ushort2 __ovld __cnfn convert_ushort2_sat_rte(char2)
int __ovld __cnfn convert_int_rtz(char)
ulong3 __ovld __cnfn convert_ulong3_rte(char3)
ulong3 __ovld __cnfn convert_ulong3_rtz(char3)
char3 __ovld __cnfn convert_char3_rtz(char3)
uint __ovld __cnfn get_work_dim(void)
Returns the number of dimensions in use.
long2 __ovld __cnfn convert_long2_sat(char2)
float __ovld __cnfn length(float p)
Return the length of vector p, i.e., sqrt(p.x2 + p.y 2 + ...)
float __ovld __cnfn acosh(float)
Inverse hyperbolic cosine.
ulong __ovld __cnfn convert_ulong_rtn(char)
short __ovld __cnfn convert_short_sat_rtz(char)
__UINTPTR_TYPE__ uintptr_t
An unsigned integer type with the property that any valid pointer to void can be converted to this ty...
uint2 __ovld __cnfn convert_uint2_rtn(char2)
float __ovld __cnfn hypot(float x, float y)
Compute the value of the square root of x^2 + y^2 without undue overflow or underflow.
uint8 __ovld __cnfn convert_uint8_rte(char8)
float __ovld __cnfn atan2(float y, float x)
Arc tangent of y / x.
float __ovld remquo(float x, float y, int *quo)
The remquo function computes the value r such that r = x - n*y, where n is the integer nearest the ex...
int __ovld atomic_exchange(volatile atomic_int *object, int desired)
char8 __ovld __cnfn convert_char8_rtn(char8)
uint8 __ovld __cnfn convert_uint8_sat_rte(char8)
void __ovld vstorea_half3(float3 data, size_t offset, half *p)
int16 __ovld __cnfn convert_int16_rte(char16)
char16 __ovld __cnfn convert_char16_rte(char16)
long __ovld __cnfn convert_long_rtn(char)
ushort4 __ovld __cnfn convert_ushort4_sat(char4)
int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order)
ushort2 __ovld __cnfn convert_ushort2_sat_rtn(char2)
char4 __ovld __cnfn convert_char4_sat(char4)
char16 __ovld __cnfn convert_char16_sat_rte(char16)
float __ovld __cnfn acospi(float x)
Compute acos (x) / PI.
uint16 __ovld __cnfn convert_uint16_rtn(char16)
float2 __ovld __cnfn convert_float2_rtn(char2)
float __ovld __cnfn half_exp2(float x)
Compute the base- 2 exponential of x.
float4 __ovld __cnfn convert_float4_rte(char4)
long4 __ovld __cnfn convert_long4_rte(char4)
uint16 __ovld __cnfn convert_uint16_sat_rtp(char16)
uint3 __ovld __cnfn convert_uint3_sat_rte(char3)
uint4 __ovld __cnfn convert_uint4_sat(char4)
int3 __ovld __cnfn convert_int3_sat_rte(char3)
ulong3 __ovld __cnfn convert_ulong3_rtp(char3)
float __ovld __cnfn native_log(float x)
Compute natural logarithm over an implementationdefined range.
float __ovld __cnfn rootn(float x, int y)
Compute x to the power 1/y.
void __ovld vstorea_half_rte(float data, size_t offset, half *p)
short __ovld __cnfn upsample(char hi, uchar lo)
result[i] = ((short)hi[i] << 8) | lo[i] result[i] = ((ushort)hi[i] << 8) | lo[i]
char __ovld __cnfn bitselect(char a, char b, char c)
Each bit of the result is the corresponding bit of a if the corresponding bit of c is 0...
short __ovld __cnfn convert_short_rtn(char)
void __ovld vstorea_half4_rte(float4 data, size_t offset, half *p)
int __ovld __cnfn convert_int_sat_rtn(char)
void __ovld vstore_half2_rtz(float2 data, size_t offset, half *p)
char3 __ovld __cnfn convert_char3_sat_rte(char3)
uchar16 __ovld __cnfn convert_uchar16(char16)
uint4 __ovld __cnfn convert_uint4_sat_rtp(char4)
ushort8 __ovld __cnfn convert_ushort8(char8)
short8 __ovld __cnfn convert_short8(char8)
float __ovld __cnfn mix(float x, float y, float a)
Returns the linear blend of x & y implemented as: x + (y - x) * a a must be a value in the range 0...
char3 __ovld __cnfn convert_char3_sat_rtz(char3)
float __ovld __cnfn fmin(float x, float y)
Returns y if y < x, otherwise it returns x.
float4 __ovld __cnfn convert_float4_rtz(char4)
ushort16 __ovld __cnfn convert_ushort16_rte(char16)
void __ovld vstorea_half4_rtz(float4 data, size_t offset, half *p)
uchar2 __ovld __cnfn convert_uchar2(char2)
ulong __ovld __cnfn convert_ulong_sat_rtz(char)
char __ovld ctz(char x)
Returns the count of trailing 0-bits in x.
int8 __ovld __cnfn convert_int8_sat_rtp(char8)
ulong2 __ovld __cnfn convert_ulong2_rtp(char2)
char3 __ovld __cnfn convert_char3_rte(char3)
uint4 __ovld __cnfn convert_uint4_sat_rtn(char4)
char4 __ovld __cnfn convert_char4_rtz(char4)
bool __ovld is_valid_event(clk_event_t event)
ushort8 __ovld __cnfn convert_ushort8_sat_rte(char8)
uchar4 __ovld __cnfn convert_uchar4_sat_rtn(char4)
void __ovld vstorea_half3_rtp(float3 data, size_t offset, half *p)
float __ovld __cnfn sinh(float)
Compute hyperbolic sine.
char2 __ovld vload2(size_t offset, const __constant char *p)
Use generic type gentype to indicate the built-in data types char, uchar, short, ushort, int, uint, long, ulong, float, double or half.
int __ovld __conv work_group_reduce_add(int x)
long16 __ovld __cnfn convert_long16_rtn(char16)
ushort3 __ovld __cnfn convert_ushort3_sat_rtp(char3)
ulong8 __ovld __cnfn convert_ulong8_rtn(char8)
long4 __ovld __cnfn convert_long4_sat_rtp(char4)
int __ovld __conv work_group_any(int predicate)
int __ovld __cnfn any(char x)
Returns 1 if the most significant bit in any component of x is set; otherwise returns 0...
void __ovld set_user_event_status(clk_event_t e, int state)
char __ovld __cnfn convert_char(char)
unsigned int uint
An unsigned 32-bit integer.
int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order)
short16 __ovld __cnfn convert_short16(char16)
uchar16 __ovld __cnfn convert_uchar16_rte(char16)
float __ovld __cnfn half_cos(float x)
Compute cosine.
size_t __ovld __cnfn get_num_groups(uint dimindx)
Returns the number of work-groups that will execute a kernel for dimension identified by dimindx...
ushort3 __ovld __cnfn convert_ushort3_sat_rte(char3)
float __ovld __cnfn native_sin(float x)
Compute sine over an implementation-defined range.
void __ovld vstorea_half16(float16 data, size_t offset, half *p)
float __ovld __cnfn native_log10(float x)
Compute a base 10 logarithm over an implementationdefined range.
short8 __ovld __cnfn convert_short8_rte(char8)
ushort4 __ovld __cnfn convert_ushort4_sat_rtz(char4)
size_t __ovld __cnfn get_image_array_size(read_only image1d_array_t image_array)
Return the image array size.
float __ovld frexp(float x, int *exp)
Extract mantissa and exponent from x.
long2 __ovld __cnfn convert_long2_sat_rtp(char2)
char8 __ovld __cnfn convert_char8_sat_rte(char8)
float __ovld __cnfn maxmag(float x, float y)
Returns x if | x | > | y |, y if | y | > | x |, otherwise fmax(x, y).
int __ovld __cnfn get_image_height(read_only image2d_t image)
Return the image height in pixels.
char3 __ovld __cnfn convert_char3_rtp(char3)
uint4 __ovld __cnfn convert_uint4_rtp(char4)
uchar8 __ovld __cnfn convert_uchar8_sat_rtz(char8)
char3 __ovld vload3(size_t offset, const __constant char *p)
int __ovld __conv work_group_scan_inclusive_add(int x)
uint2 __ovld __cnfn convert_uint2_rte(char2)
int __ovld atomic_fetch_max(volatile atomic_int *object, int operand)
char __ovld __cnfn select(char a, char b, char c)
For each component of a vector type, result[i] = if MSB of c[i] is set ? b[i] : a[i].
ulong __ovld __cnfn convert_ulong_sat(char)
void __ovld atomic_init(volatile atomic_int *object, int value)
float __ovld __cnfn sqrt(float)
Compute square root.
int __ovld __cnfn get_image_depth(read_only image3d_t image)
Return the image depth in pixels.
uint __ovld __cnfn convert_uint_rtz(char)
long3 __ovld __cnfn convert_long3_sat_rtp(char3)
void __ovld vstore_half4(float4 data, size_t offset, half *p)
float __ovld __cnfn fast_normalize(float p)
Returns a vector in the same direction as p but with a length of 1.
uint3 __ovld __cnfn convert_uint3_sat_rtz(char3)
long __ovld __cnfn convert_long_sat_rtn(char)
char __ovld __cnfn rotate(char v, char i)
For each element in v, the bits are shifted left by the number of bits given by the corresponding ele...
char16 __ovld __cnfn convert_char16_sat_rtz(char16)
short2 __ovld __cnfn convert_short2(char2)
uint __ovld __cnfn convert_uint_sat(char)
float __ovld __cnfn native_cos(float x)
Compute cosine over an implementation-defined range.
ulong8 __ovld __cnfn convert_ulong8_sat_rtz(char8)
ulong16 __ovld __cnfn convert_ulong16_rtp(char16)
char8 __ovld __cnfn convert_char8(char8)
__PTRDIFF_TYPE__ ptrdiff_t
A signed integer type that is the result of subtracting two pointers.
uchar3 __ovld __cnfn convert_uchar3_sat_rte(char3)
float __ovld __cnfn ldexp(float x, int n)
Multiply x by 2 to the power n.
ushort2 __ovld __cnfn convert_ushort2(char2)
size_t __ovld __cnfn get_global_offset(uint dimindx)
get_global_offset returns the offset values specified in global_work_offset argument to clEnqueueNDRa...
short16 __ovld __cnfn convert_short16_sat_rtz(char16)
uint2 __ovld __cnfn convert_uint2(char2)
ulong2 __ovld __cnfn convert_ulong2_rte(char2)
int16 __ovld __cnfn convert_int16_rtz(char16)
int8 __ovld __cnfn convert_int8_rte(char8)
uchar3 __ovld __cnfn convert_uchar3(char3)
ulong4 __ovld __cnfn convert_ulong4(char4)
int2 __ovld __cnfn convert_int2_rte(char2)
ulong8 __ovld __cnfn convert_ulong8_sat(char8)
float __ovld __cnfn convert_float_rte(char)
void __ovld vstorea_half16_rtz(float16 data, size_t offset, half *p)
float3 __ovld __cnfn convert_float3(char3)
float __ovld __cnfn half_sin(float x)
Compute sine.
uchar16 __ovld __cnfn convert_uchar16_rtp(char16)
uint2 __ovld __cnfn convert_uint2_rtp(char2)
char8 __ovld __cnfn convert_char8_sat_rtn(char8)
ulong3 __ovld __cnfn convert_ulong3_sat_rtz(char3)
short2 __ovld __cnfn convert_short2_rtp(char2)
ulong3 __ovld __cnfn convert_ulong3_sat(char3)
float __ovld vload_half(size_t offset, const __constant half *p)
Read sizeof (half) bytes of data from address (p + offset).
int2 __ovld __cnfn convert_int2_sat(char2)
int __ovld __conv work_group_scan_exclusive_add(int x)
ushort2 __ovld __cnfn convert_ushort2_rtn(char2)
void __ovld vstorea_half3_rte(float3 data, size_t offset, half *p)
int __ovld __cnfn get_image_channel_data_type(read_only image1d_t image)
uchar3 __ovld __cnfn convert_uchar3_rte(char3)
short __ovld __cnfn convert_short_sat_rtn(char)
ushort8 __ovld __cnfn convert_ushort8_rtn(char8)
float __ovld __cnfn half_powr(float x, float y)
Compute x to the power y, where x is >= 0.
char3 __ovld __cnfn convert_char3_sat_rtn(char3)
short3 __ovld __cnfn convert_short3_rtp(char3)
uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, int2 coord)
float __ovld __cnfn native_divide(float x, float y)
Compute x / y over an implementation-defined range.
char __ovld __cnfn sub_sat(char x, char y)
Returns x - y and saturates the result.
uint8 __ovld __cnfn convert_uint8_sat_rtp(char8)
int4 __ovld __cnfn convert_int4_rtz(char4)
uchar16 __ovld __cnfn convert_uchar16_sat_rtp(char16)
float4 __ovld __cnfn convert_float4_rtn(char4)
float __ovld __cnfn dot(float p0, float p1)
Compute dot product.
char __ovld __cnfn convert_char_sat_rtp(char)
uint8 __ovld __cnfn convert_uint8_rtz(char8)
long __ovld __cnfn convert_long_sat(char)
ushort2 __ovld __cnfn convert_ushort2_rtz(char2)
int __ovld __cnfn convert_int_rtn(char)
int kernel_enqueue_flags_t
long8 __ovld __cnfn convert_long8_sat_rtz(char8)
float2 __ovld __cnfn convert_float2_rte(char2)
long3 __ovld __cnfn convert_long3_sat_rte(char3)
long4 __ovld __cnfn convert_long4(char4)
int4 __ovld __cnfn convert_int4_sat_rte(char4)
ushort __ovld __cnfn convert_ushort_rtz(char)
short8 __ovld __cnfn convert_short8_sat_rte(char8)
float __ovld __cnfn fdim(float x, float y)
x - y if x > y, +0 if x is less than or equal to y.
int __ovld __cnfn convert_int_sat_rtz(char)
ulong3 __ovld __cnfn convert_ulong3_rtn(char3)
ndrange_t __ovld ndrange_3D(const size_t[3])
int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order)
long8 __ovld __cnfn convert_long8_rte(char8)
size_t __ovld __cnfn get_local_size(uint dimindx)
Returns the number of local work-items specified in dimension identified by dimindx.
ushort8 __ovld __cnfn convert_ushort8_rtz(char8)
ulong3 __ovld __cnfn convert_ulong3_sat_rte(char3)
ushort16 __ovld __cnfn convert_ushort16_rtp(char16)
char8 __ovld __cnfn convert_char8_sat_rtz(char8)
bool __ovld is_valid_reserve_id(reserve_id_t reserve_id)
int printf(__constant const char *st,...)
long4 __ovld __cnfn convert_long4_rtp(char4)
ulong2 __ovld __cnfn convert_ulong2_sat(char2)
float __ovld __cnfn half_log2(float x)
Compute a base 2 logarithm.
char char2 __attribute__((ext_vector_type(2)))
float __ovld __cnfn sign(float x)
Returns 1.0 if x > 0, -0.0 if x = -0.0, +0.0 if x = +0.0, or -1.0 if x < 0.
int2 __ovld __cnfn convert_int2_sat_rtn(char2)
long16 __ovld __cnfn convert_long16_rtz(char16)
float __ovld __cnfn convert_float_rtp(char)
void __ovld vstore_half2(float2 data, size_t offset, half *p)
The floatn value given by data is converted to a halfn value using the appropriate rounding mode...
float __ovld __cnfn fma(float a, float b, float c)
Returns the correctly rounded floating-point representation of the sum of c with the infinitely preci...
ulong4 __ovld __cnfn convert_ulong4_rte(char4)
float __ovld __cnfn fast_distance(float p0, float p1)
Returns fast_length(p0 - p1).
int __ovld __cnfn convert_int_sat(char)
uchar8 __ovld __cnfn convert_uchar8_rtp(char8)
short2 __ovld __cnfn convert_short2_sat_rtn(char2)
short8 __ovld __cnfn convert_short8_sat(char8)
int8 __ovld __cnfn convert_int8_sat_rte(char8)
ushort8 __ovld __cnfn convert_ushort8_sat_rtz(char8)
int16 __ovld __cnfn convert_int16_sat_rte(char16)
short3 __ovld __cnfn convert_short3_rtz(char3)
float __ovld __cnfn half_tan(float x)
Compute tangent.
uint4 __ovld __cnfn convert_uint4_rtn(char4)
event_t __ovld async_work_group_strided_copy(__local char *dst, const __global char *src, size_t num_elements, size_t src_stride, event_t event)
Perform an async gather of num_elements gentype elements from src to dst.
short __ovld __cnfn convert_short_rte(char)
short2 __ovld __cnfn convert_short2_sat_rtp(char2)
long16 __ovld __cnfn convert_long16_sat_rtn(char16)
ulong8 __ovld __cnfn convert_ulong8(char8)
void __ovld vstore_half4_rte(float4 data, size_t offset, half *p)
char2 __ovld __cnfn convert_char2_sat_rtz(char2)
short3 __ovld __cnfn convert_short3_sat_rtz(char3)
ulong4 __ovld __cnfn convert_ulong4_rtn(char4)
char __ovld __cnfn convert_char_rtz(char)
short4 __ovld __cnfn convert_short4_sat(char4)
short4 __ovld __cnfn convert_short4_sat_rte(char4)
ulong2 __ovld __cnfn convert_ulong2_sat_rtp(char2)
float __ovld __cnfn step(float edge, float x)
Returns 0.0 if x < edge, otherwise it returns 1.0.
ulong8 __ovld __cnfn convert_ulong8_sat_rtp(char8)
uchar __ovld __cnfn convert_uchar_sat(char)
float __ovld __cnfn exp2(float)
Exponential base 2 function.
float __ovld __cnfn cosh(float)
Compute hyperbolic cosine.
char __ovld __cnfn convert_char_sat(char)
ushort4 __ovld __cnfn convert_ushort4_rtp(char4)
float16 __ovld vloada_half16(size_t offset, const __constant half *p)
ushort3 __ovld __cnfn convert_ushort3_sat_rtz(char3)
float16 __ovld __cnfn convert_float16(char16)
void __ovld atomic_store(volatile atomic_int *object, int desired)
int3 __ovld __cnfn convert_int3_rtn(char3)
short8 __ovld __cnfn convert_short8_sat_rtn(char8)
float __ovld __cnfn floor(float)
Round to integral value using the round to -ve infinity rounding mode.
uint2 __ovld __cnfn convert_uint2_sat_rte(char2)
void __ovld vstorea_half8(float8 data, size_t offset, half *p)
long8 __ovld __cnfn convert_long8_sat_rtp(char8)
long2 __ovld __cnfn convert_long2_rtn(char2)
int16 __ovld __cnfn convert_int16(char16)
ushort2 __ovld __cnfn convert_ushort2_sat_rtz(char2)
ushort4 __ovld __cnfn convert_ushort4_sat_rte(char4)
ushort3 __ovld __cnfn convert_ushort3_rtp(char3)
void __ovld vstore_half3_rtp(float3 data, size_t offset, half *p)
long3 __ovld __cnfn convert_long3_sat(char3)
char __ovld __cnfn max(char x, char y)
Returns y if x < y, otherwise it returns x.
void __ovld vstorea_half2_rtz(float2 data, size_t offset, half *p)
short8 __ovld __cnfn convert_short8_rtz(char8)
float __ovld __cnfn lgamma(float x)
Log gamma function.
void __ovld vstore_half16_rtp(float16 data, size_t offset, half *p)
uchar4 __ovld __cnfn convert_uchar4_sat_rtp(char4)
uchar2 __ovld __cnfn convert_uchar2_rtn(char2)
ushort __ovld __cnfn convert_ushort_sat_rtz(char)
int __ovld __cnfn convert_int_sat_rte(char)
short2 __ovld __cnfn convert_short2_rtn(char2)
float2 __ovld __cnfn convert_float2_rtz(char2)
short2 __ovld __cnfn convert_short2_sat_rte(char2)
uchar __ovld __cnfn convert_uchar_sat_rtn(char)
float __ovld __cnfn atan2pi(float y, float x)
Compute atan2 (y, x) / PI.
uint16 __ovld __cnfn convert_uint16_sat_rtn(char16)
void __ovld vstorea_half(float data, size_t offset, half *p)
The floatn value given by data is converted to a halfn value using the appropriate rounding mode...
int4 __ovld __cnfn convert_int4_rtn(char4)
int __ovld __cnfn convert_int_sat_rtp(char)
uint16 __ovld __cnfn convert_uint16_sat_rtz(char16)
short __ovld __cnfn convert_short(char)
float __ovld lgamma_r(float x, int *signp)
ulong2 __ovld __cnfn convert_ulong2_sat_rtn(char2)
ushort8 __ovld __cnfn convert_ushort8_sat_rtp(char8)
unsigned char uchar
An unsigned 8-bit integer.
int8 __ovld __cnfn convert_int8_sat_rtz(char8)
uchar16 __ovld __cnfn convert_uchar16_sat(char16)
ulong3 __ovld __cnfn convert_ulong3_sat_rtn(char3)
void __ovld vstorea_half16_rte(float16 data, size_t offset, half *p)
float __ovld __cnfn atanpi(float x)
Compute atan (x) / PI.
void __ovld vstore_half16_rtn(float16 data, size_t offset, half *p)
ulong __ovld __cnfn convert_ulong_rte(char)
void __ovld retain_event(clk_event_t)
short16 __ovld __cnfn convert_short16_sat_rte(char16)
char4 __ovld __cnfn convert_char4_rtp(char4)
int16 __ovld __cnfn convert_int16_rtn(char16)
short16 __ovld __cnfn convert_short16_sat(char16)
void __ovld vstore_half2_rte(float2 data, size_t offset, half *p)
int3 __ovld __cnfn convert_int3_sat_rtp(char3)
void __ovld vstorea_half_rtn(float data, size_t offset, half *p)
int __ovld atomic_dec(volatile __global int *p)
Read the 32-bit value (referred to as old) stored at location pointed by p.
char2 __ovld __cnfn shuffle2(char2 x, char2 y, uchar2 mask)
short4 __ovld __cnfn convert_short4_sat_rtz(char4)
float __ovld vloada_half(size_t offset, const __constant half *p)
For n = 1, 2, 4, 8 and 16 read sizeof (halfn) bytes of data from address (p + (offset * n))...
int __ovld __cnfn isgreaterequal(float x, float y)
Returns the component-wise compare of x >= y.
int __ovld enqueue_marker(queue_t, uint, const __private clk_event_t *, __private clk_event_t *)
void __ovld vstorea_half8_rtn(float8 data, size_t offset, half *p)
uchar8 __ovld __cnfn convert_uchar8_sat_rtn(char8)
long3 __ovld __cnfn convert_long3_rtz(char3)
short4 __ovld __cnfn convert_short4_rtz(char4)
float4 __ovld vload_half4(size_t offset, const __constant half *p)
uint8 __ovld __cnfn convert_uint8(char8)
uchar4 __ovld __cnfn convert_uchar4(char4)
ulong8 __ovld __cnfn convert_ulong8_sat_rtn(char8)
float3 __ovld __cnfn convert_float3_rtp(char3)
ulong16 __ovld __cnfn convert_ulong16_sat_rtz(char16)
short __ovld __cnfn convert_short_sat_rte(char)
uint3 __ovld __cnfn convert_uint3_rtp(char3)
uchar2 __ovld __cnfn convert_uchar2_rtz(char2)
long16 __ovld __cnfn convert_long16(char16)
char __ovld __cnfn add_sat(char x, char y)
Returns x + y and saturates the result.
float __ovld __cnfn fast_length(float p)
Returns the length of vector p computed as: half_sqrt(p.x2 + p.y2 + ...)
long4 __ovld __cnfn convert_long4_rtn(char4)
int3 __ovld __cnfn convert_int3_sat_rtn(char3)
char __ovld __cnfn rhadd(char x, char y)
Returns (x + y + 1) >> 1.
void __ovld vstorea_half2(float2 data, size_t offset, half *p)
bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected, int desired, memory_order success, memory_order failure)
short2 __ovld __cnfn convert_short2_rtz(char2)
short4 __ovld __cnfn convert_short4_sat_rtn(char4)
char3 __ovld __cnfn convert_char3_rtn(char3)
int __ovld __cnfn isordered(float x, float y)
Test if arguments are ordered.
float __ovld __cnfn cos(float)
Compute cosine.
short2 __ovld __cnfn convert_short2_rte(char2)
long __ovld __cnfn convert_long_rte(char)
short4 __ovld __cnfn convert_short4_sat_rtp(char4)
int __ovld __cnfn isnan(float)
Test for a NaN.
int __ovld __cnfn get_image_channel_order(read_only image1d_t image)
void __ovld vstorea_half2_rte(float2 data, size_t offset, half *p)
long2 __ovld __cnfn convert_long2(char2)
void __ovld atomic_flag_clear(volatile atomic_flag *object)
int __ovld __cnfn isinf(float)
Test for infinity value (+ve or -ve) .
void __ovld vstorea_half2_rtp(float2 data, size_t offset, half *p)
uint __ovld __cnfn convert_uint_sat_rtp(char)
ushort __ovld __cnfn convert_ushort(char)
char8 __ovld __cnfn convert_char8_sat_rtp(char8)
int4 __ovld __cnfn convert_int4_rte(char4)
float8 __ovld __cnfn convert_float8_rtp(char8)
float __ovld __cnfn half_log(float x)
Compute natural logarithm.
ushort __ovld __cnfn convert_ushort_sat(char)
ulong8 __ovld __cnfn convert_ulong8_sat_rte(char8)
void __ovld vstore_half(float data, size_t offset, half *p)
The float value given by data is first converted to a half value using the appropriate rounding mode...
int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order)
int __ovld atomic_load(volatile atomic_int *object)
long3 __ovld __cnfn convert_long3_sat_rtn(char3)
ushort4 __ovld __cnfn convert_ushort4_rtz(char4)
char2 __ovld __cnfn convert_char2_sat_rtp(char2)
float __ovld __cnfn half_exp(float x)
Compute the base- e exponential of x.
int __ovld __cnfn isnormal(float)
Test for a normal value.
void __ovld vstore_half8_rte(float8 data, size_t offset, half *p)
long4 __ovld __cnfn convert_long4_sat_rte(char4)
ushort8 __ovld __cnfn convert_ushort8_rte(char8)
ushort8 __ovld __cnfn convert_ushort8_sat_rtn(char8)
int __ovld atomic_fetch_and(volatile atomic_int *object, int operand)
float __ovld __cnfn sinpi(float x)
Compute sin (PI * x).
int16 __ovld __cnfn convert_int16_sat_rtn(char16)
unsigned long ulong
An unsigned 64-bit integer.
float __ovld __cnfn atanh(float)
Hyperbolic arc tangent.
void __ovld wait_group_events(int num_events, event_t *event_list)
Wait for events that identify the async_work_group_copy operations to complete.
size_t __ovld get_local_linear_id(void)
short4 __ovld __cnfn convert_short4_rte(char4)
char4 __ovld __cnfn convert_char4_sat_rte(char4)
int __ovld atomic_sub(volatile __global int *p, int val)
Read the 32-bit value (referred to as old) stored at location pointed by p.
float __ovld __cnfn half_recip(float x)
Compute reciprocal.
ushort2 __ovld __cnfn convert_ushort2_sat(char2)
ulong __ovld __cnfn convert_ulong(char)
float __ovld __cnfn pow(float x, float y)
Compute x to the power y.
int8 __ovld __cnfn convert_int8_sat_rtn(char8)
void __ovld release_event(clk_event_t)
char __ovld __cnfn convert_char_rtn(char)
ushort16 __ovld __cnfn convert_ushort16_sat_rtz(char16)
int __ovld __cnfn convert_int_rtp(char)
float16 __ovld __cnfn convert_float16_rte(char16)
int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order)
void __ovld vstore_half4_rtn(float4 data, size_t offset, half *p)
short __ovld __cnfn convert_short_sat_rtp(char)
float8 __ovld __cnfn convert_float8_rtn(char8)
void __ovld vstorea_half3_rtn(float3 data, size_t offset, half *p)
uchar16 __ovld __cnfn convert_uchar16_sat_rtz(char16)
int __ovld atomic_max(volatile __global int *p, int val)
Read the 32-bit value (referred to as old) stored at location pointed by p.
uint8 __ovld __cnfn convert_uint8_sat_rtn(char8)
float3 __ovld vloada_half3(size_t offset, const __constant half *p)
float __ovld __cnfn convert_float(char)
char2 __ovld __cnfn convert_char2_sat_rtn(char2)