/* * Copyright 1993-2017 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #if !defined(__TEXTURE_FETCH_FUNCTIONS_H__) #define __TEXTURE_FETCH_FUNCTIONS_H__ #if defined(__cplusplus) && defined(__CUDACC__) /******************************************************************************* * * * * * * *******************************************************************************/ #include "cuda_runtime_api.h" #include "cuda_texture_types.h" #if defined(_WIN32) # define __DEPRECATED__ __declspec(deprecated) #else # define __DEPRECATED__ __attribute__((deprecated)) #endif template struct __nv_tex_rmet_ret { }; template<> struct __nv_tex_rmet_ret { typedef char type; }; template<> struct __nv_tex_rmet_ret { typedef signed char type; }; template<> struct __nv_tex_rmet_ret { typedef unsigned char type; }; template<> struct __nv_tex_rmet_ret { typedef char1 type; }; template<> struct __nv_tex_rmet_ret { typedef uchar1 type; }; template<> struct __nv_tex_rmet_ret { typedef char2 type; }; template<> struct __nv_tex_rmet_ret { typedef uchar2 type; }; template<> struct __nv_tex_rmet_ret { typedef char4 type; }; template<> struct __nv_tex_rmet_ret { typedef uchar4 type; }; template<> struct __nv_tex_rmet_ret { typedef short type; }; template<> struct __nv_tex_rmet_ret { typedef unsigned short type; }; template<> struct __nv_tex_rmet_ret { typedef short1 type; }; template<> struct __nv_tex_rmet_ret { typedef ushort1 type; }; template<> struct __nv_tex_rmet_ret { typedef short2 type; }; template<> struct __nv_tex_rmet_ret { typedef ushort2 type; }; template<> struct __nv_tex_rmet_ret { typedef short4 type; }; template<> struct __nv_tex_rmet_ret { typedef ushort4 type; }; template<> struct __nv_tex_rmet_ret { typedef int type; }; template<> struct __nv_tex_rmet_ret { typedef unsigned int type; }; template<> struct __nv_tex_rmet_ret { typedef int1 type; }; template<> struct __nv_tex_rmet_ret { typedef uint1 type; }; template<> struct __nv_tex_rmet_ret { typedef int2 type; }; template<> struct __nv_tex_rmet_ret { typedef uint2 type; }; template<> struct __nv_tex_rmet_ret { typedef int4 type; }; template<> struct __nv_tex_rmet_ret { typedef uint4 type; }; #if !defined(__LP64__) template<> struct __nv_tex_rmet_ret { typedef long type; }; template<> struct __nv_tex_rmet_ret { typedef unsigned long type; }; template<> struct __nv_tex_rmet_ret { typedef long1 type; }; template<> struct __nv_tex_rmet_ret { typedef ulong1 type; }; template<> struct __nv_tex_rmet_ret { typedef long2 type; }; template<> struct __nv_tex_rmet_ret { typedef ulong2 type; }; template<> struct __nv_tex_rmet_ret { typedef long4 type; }; template<> struct __nv_tex_rmet_ret { typedef ulong4 type; }; #endif /* !__LP64__ */ template<> struct __nv_tex_rmet_ret { typedef float type; }; template<> struct __nv_tex_rmet_ret { typedef float1 type; }; template<> struct __nv_tex_rmet_ret { typedef float2 type; }; template<> struct __nv_tex_rmet_ret { typedef float4 type; }; template struct __nv_tex_rmet_cast { typedef T* type; }; #if !defined(__LP64__) template<> struct __nv_tex_rmet_cast { typedef int *type; }; template<> struct __nv_tex_rmet_cast { typedef unsigned int *type; }; template<> struct __nv_tex_rmet_cast { typedef int1 *type; }; template<> struct __nv_tex_rmet_cast { typedef uint1 *type; }; template<> struct __nv_tex_rmet_cast { typedef int2 *type; }; template<> struct __nv_tex_rmet_cast { typedef uint2 *type; }; template<> struct __nv_tex_rmet_cast { typedef int4 *type; }; template<> struct __nv_tex_rmet_cast { typedef uint4 *type; }; #endif /* !__LP64__ */ template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmet_ret::type tex1Dfetch(texture t, int x) { #ifdef __CUDA_ARCH__ typename __nv_tex_rmet_ret::type temp; __nv_tex_surf_handler("__tex1Dfetch_v2", (typename __nv_tex_rmet_cast::type)&temp, t, x); return temp; #endif } template struct __nv_tex_rmnf_ret { }; template <> struct __nv_tex_rmnf_ret { typedef float type; }; template <> struct __nv_tex_rmnf_ret { typedef float type; }; template <> struct __nv_tex_rmnf_ret { typedef float type; }; template <> struct __nv_tex_rmnf_ret { typedef float type; }; template <> struct __nv_tex_rmnf_ret { typedef float type; }; template <> struct __nv_tex_rmnf_ret { typedef float1 type; }; template <> struct __nv_tex_rmnf_ret { typedef float1 type; }; template <> struct __nv_tex_rmnf_ret { typedef float1 type; }; template <> struct __nv_tex_rmnf_ret { typedef float1 type; }; template <> struct __nv_tex_rmnf_ret { typedef float2 type; }; template <> struct __nv_tex_rmnf_ret { typedef float2 type; }; template <> struct __nv_tex_rmnf_ret { typedef float2 type; }; template <> struct __nv_tex_rmnf_ret { typedef float2 type; }; template <> struct __nv_tex_rmnf_ret { typedef float4 type; }; template <> struct __nv_tex_rmnf_ret { typedef float4 type; }; template <> struct __nv_tex_rmnf_ret { typedef float4 type; }; template <> struct __nv_tex_rmnf_ret { typedef float4 type; }; template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmnf_ret::type tex1Dfetch(texture t, int x) { #ifdef __CUDA_ARCH__ T type_dummy; typename __nv_tex_rmnf_ret::type retval; __nv_tex_surf_handler("__tex1Dfetch_rmnf_v2", &type_dummy, &retval, t, x); return retval; #endif /* __CUDA_ARCH__ */ } // tex1D template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmet_ret::type tex1D(texture t, float x) { #ifdef __CUDA_ARCH__ typename __nv_tex_rmet_ret::type temp; __nv_tex_surf_handler("__tex1D_v2", (typename __nv_tex_rmet_cast::type) &temp, t, x); return temp; #endif } template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmnf_ret::type tex1D(texture t, float x) { #ifdef __CUDA_ARCH__ T type_dummy; typename __nv_tex_rmnf_ret::type retval; __nv_tex_surf_handler("__tex1D_rmnf_v2", &type_dummy, &retval, t, x); return retval; #endif /* __CUDA_ARCH__ */ } //tex2D template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmet_ret::type tex2D(texture t, float x, float y) { #ifdef __CUDA_ARCH__ typename __nv_tex_rmet_ret::type temp; __nv_tex_surf_handler("__tex2D_v2", (typename __nv_tex_rmet_cast::type) &temp, t, x, y); return temp; #endif } template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmnf_ret::type tex2D(texture t, float x, float y) { #ifdef __CUDA_ARCH__ T type_dummy; typename __nv_tex_rmnf_ret::type retval; __nv_tex_surf_handler("__tex2D_rmnf_v2", &type_dummy, &retval, t, x, y); return retval; #endif /* __CUDA_ARCH__ */ } //tex1DLayered template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmet_ret::type tex1DLayered(texture t, float x, int layer) { #ifdef __CUDA_ARCH__ typename __nv_tex_rmet_ret::type temp; __nv_tex_surf_handler("__tex1DLayered_v2", (typename __nv_tex_rmet_cast::type) &temp, t, x, layer); return temp; #endif } template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmnf_ret::type tex1DLayered(texture t, float x, int layer) { #ifdef __CUDA_ARCH__ T type_dummy; typename __nv_tex_rmnf_ret::type retval; __nv_tex_surf_handler("__tex1DLayered_rmnf_v2", &type_dummy, &retval, t, x, layer); return retval; #endif /* __CUDA_ARCH__ */ } //tex2DLayered template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmet_ret::type tex2DLayered(texture t, float x, float y, int layer) { #ifdef __CUDA_ARCH__ typename __nv_tex_rmet_ret::type temp; __nv_tex_surf_handler("__tex2DLayered_v2", (typename __nv_tex_rmet_cast::type) &temp, t, x, y, layer); return temp; #endif } template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmnf_ret::type tex2DLayered(texture t, float x, float y, int layer) { #ifdef __CUDA_ARCH__ T type_dummy; typename __nv_tex_rmnf_ret::type retval; __nv_tex_surf_handler("__tex2DLayered_rmnf_v2", &type_dummy, &retval, t, x, y, layer); return retval; #endif /* __CUDA_ARCH__ */ } // tex3D template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmet_ret::type tex3D(texture t, float x, float y, float z) { #ifdef __CUDA_ARCH__ typename __nv_tex_rmet_ret::type temp; __nv_tex_surf_handler("__tex3D_v2", (typename __nv_tex_rmet_cast::type) &temp, t, x, y, z); return temp; #endif } template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmnf_ret::type tex3D(texture t, float x, float y, float z) { #ifdef __CUDA_ARCH__ T type_dummy; typename __nv_tex_rmnf_ret::type retval; __nv_tex_surf_handler("__tex3D_rmnf_v2", &type_dummy, &retval, t, x, y, z); return retval; #endif /* __CUDA_ARCH__ */ } // texCubemap template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmet_ret::type texCubemap(texture t, float x, float y, float z) { #ifdef __CUDA_ARCH__ typename __nv_tex_rmet_ret::type temp; __nv_tex_surf_handler("__texCubemap_v2", (typename __nv_tex_rmet_cast::type) &temp, t, x, y, z); return temp; #endif } template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmnf_ret::type texCubemap(texture t, float x, float y, float z) { #ifdef __CUDA_ARCH__ T type_dummy; typename __nv_tex_rmnf_ret::type retval; __nv_tex_surf_handler("__texCubemap_rmnf_v2", &type_dummy, &retval, t, x, y, z); return retval; #endif /* __CUDA_ARCH__ */ } template struct __nv_tex2dgather_ret { }; template <> struct __nv_tex2dgather_ret { typedef char4 type; }; template <> struct __nv_tex2dgather_ret { typedef char4 type; }; template <> struct __nv_tex2dgather_ret { typedef char4 type; }; template <> struct __nv_tex2dgather_ret { typedef char4 type; }; template <> struct __nv_tex2dgather_ret { typedef char4 type; }; template <> struct __nv_tex2dgather_ret { typedef char4 type; }; template <> struct __nv_tex2dgather_ret { typedef uchar4 type; }; template <> struct __nv_tex2dgather_ret { typedef uchar4 type; }; template <> struct __nv_tex2dgather_ret { typedef uchar4 type; }; template <> struct __nv_tex2dgather_ret { typedef uchar4 type; }; template <> struct __nv_tex2dgather_ret { typedef uchar4 type; }; template <> struct __nv_tex2dgather_ret { typedef short4 type; }; template <> struct __nv_tex2dgather_ret { typedef short4 type; }; template <> struct __nv_tex2dgather_ret { typedef short4 type; }; template <> struct __nv_tex2dgather_ret { typedef short4 type; }; template <> struct __nv_tex2dgather_ret { typedef short4 type; }; template <> struct __nv_tex2dgather_ret { typedef ushort4 type; }; template <> struct __nv_tex2dgather_ret { typedef ushort4 type; }; template <> struct __nv_tex2dgather_ret { typedef ushort4 type; }; template <> struct __nv_tex2dgather_ret { typedef ushort4 type; }; template <> struct __nv_tex2dgather_ret { typedef ushort4 type; }; template <> struct __nv_tex2dgather_ret { typedef int4 type; }; template <> struct __nv_tex2dgather_ret { typedef int4 type; }; template <> struct __nv_tex2dgather_ret { typedef int4 type; }; template <> struct __nv_tex2dgather_ret { typedef int4 type; }; template <> struct __nv_tex2dgather_ret { typedef int4 type; }; template <> struct __nv_tex2dgather_ret { typedef uint4 type; }; template <> struct __nv_tex2dgather_ret { typedef uint4 type; }; template <> struct __nv_tex2dgather_ret { typedef uint4 type; }; template <> struct __nv_tex2dgather_ret { typedef uint4 type; }; template <> struct __nv_tex2dgather_ret { typedef uint4 type; }; template <> struct __nv_tex2dgather_ret { typedef float4 type; }; template <> struct __nv_tex2dgather_ret { typedef float4 type; }; template <> struct __nv_tex2dgather_ret { typedef float4 type; }; template <> struct __nv_tex2dgather_ret { typedef float4 type; }; template <> struct __nv_tex2dgather_ret { typedef float4 type; }; template static __device__ __forceinline__ typename __nv_tex2dgather_ret::type tex2Dgather(texture t, float x, float y, int comp=0) { #ifdef __CUDA_ARCH__ T type_dummy; typename __nv_tex2dgather_ret::type retval; __nv_tex_surf_handler("__tex2Dgather_v2", &type_dummy, &retval, t, x, y, comp); return retval; #endif /* __CUDA_ARCH__ */ } template struct __nv_tex2dgather_rmnf_ret { }; template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; template<> struct __nv_tex2dgather_rmnf_ret { typedef float4 type; }; template static __device__ __forceinline__ typename __nv_tex2dgather_rmnf_ret::type tex2Dgather(texture t, float x, float y, int comp = 0) { #ifdef __CUDA_ARCH__ T type_dummy; typename __nv_tex2dgather_rmnf_ret::type retval; __nv_tex_surf_handler("__tex2Dgather_rmnf_v2", &type_dummy, &retval, t, x, y, comp); return retval; #endif /* __CUDA_ARCH__ */ } // tex1DLod template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmet_ret::type tex1DLod(texture t, float x, float level) { #ifdef __CUDA_ARCH__ typename __nv_tex_rmet_ret::type temp; __nv_tex_surf_handler("__tex1DLod_v2", (typename __nv_tex_rmet_cast::type)&temp, t, x, level); return temp; #endif } template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmnf_ret::type tex1DLod(texture t, float x, float level) { #ifdef __CUDA_ARCH__ T type_dummy; typename __nv_tex_rmnf_ret::type retval; __nv_tex_surf_handler("__tex1DLod_rmnf_v2", &type_dummy, &retval, t, x, level); return retval; #endif /* __CUDA_ARCH__ */ } // tex2DLod template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmet_ret::type tex2DLod(texture t, float x, float y, float level) { #ifdef __CUDA_ARCH__ typename __nv_tex_rmet_ret::type temp; __nv_tex_surf_handler("__tex2DLod_v2", (typename __nv_tex_rmet_cast::type)&temp, t, x, y, level); return temp; #endif } template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmnf_ret::type tex2DLod(texture t, float x, float y, float level) { #ifdef __CUDA_ARCH__ T type_dummy; typename __nv_tex_rmnf_ret::type retval; __nv_tex_surf_handler("__tex2DLod_rmnf_v2", &type_dummy, &retval, t, x, y, level); return retval; #endif /* __CUDA_ARCH__ */ } // tex1DLayeredLod template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmet_ret::type tex1DLayeredLod(texture t, float x, int layer, float level) { #ifdef __CUDA_ARCH__ typename __nv_tex_rmet_ret::type temp; __nv_tex_surf_handler("__tex1DLayeredLod_v2", (typename __nv_tex_rmet_cast::type)&temp, t, x, layer, level); return temp; #endif } template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmnf_ret::type tex1DLayeredLod(texture t, float x, int layer, float level) { #ifdef __CUDA_ARCH__ T type_dummy; typename __nv_tex_rmnf_ret::type retval; __nv_tex_surf_handler("__tex1DLayeredLod_rmnf_v2", &type_dummy, &retval, t, x, layer, level); return retval; #endif /* __CUDA_ARCH__ */ } // tex2DLayeredLod template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmet_ret::type tex2DLayeredLod(texture t, float x, float y, int layer, float level) { #ifdef __CUDA_ARCH__ typename __nv_tex_rmet_ret::type temp; __nv_tex_surf_handler("__tex2DLayeredLod_v2", (typename __nv_tex_rmet_cast::type)&temp, t, x, y, layer, level); return temp; #endif } template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmnf_ret::type tex2DLayeredLod(texture t, float x, float y, int layer, float level) { #ifdef __CUDA_ARCH__ T type_dummy; typename __nv_tex_rmnf_ret::type retval; __nv_tex_surf_handler("__tex2DLayeredLod_rmnf_v2", &type_dummy, &retval, t, x, y, layer, level); return retval; #endif /* __CUDA_ARCH__ */ } // tex3DLod template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmet_ret::type tex3DLod(texture t, float x, float y, float z, float level) { #ifdef __CUDA_ARCH__ typename __nv_tex_rmet_ret::type temp; __nv_tex_surf_handler("__tex3DLod_v2",(typename __nv_tex_rmet_cast::type)&temp, t, x, y, z, level); return temp; #endif } template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmnf_ret::type tex3DLod(texture t, float x, float y, float z, float level) { #ifdef __CUDA_ARCH__ T type_dummy; typename __nv_tex_rmnf_ret::type retval; __nv_tex_surf_handler("__tex3DLod_rmnf_v2", &type_dummy, &retval, t, x, y, z, level); return retval; #endif /* __CUDA_ARCH__ */ } // texCubemapLod template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmet_ret::type texCubemapLod(texture t, float x, float y, float z, float level) { #ifdef __CUDA_ARCH__ typename __nv_tex_rmet_ret::type temp; __nv_tex_surf_handler("__texCubemapLod_v2",(typename __nv_tex_rmet_cast::type)&temp, t, x, y, z, level); return temp; #endif } template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmnf_ret::type texCubemapLod(texture t, float x, float y, float z, float level) { #ifdef __CUDA_ARCH__ T type_dummy; typename __nv_tex_rmnf_ret::type retval; __nv_tex_surf_handler("__texCubemapLod_rmnf_v2", &type_dummy, &retval, t, x, y, z, level); return retval; #endif /* __CUDA_ARCH__ */ } // texCubemapLayered template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmet_ret::type texCubemapLayered(texture t, float x, float y, float z, int layer) { #ifdef __CUDA_ARCH__ typename __nv_tex_rmet_ret::type temp; __nv_tex_surf_handler("__texCubemapLayered_v2",(typename __nv_tex_rmet_cast::type)&temp, t, x, y, z, layer); return temp; #endif } template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmnf_ret::type texCubemapLayered(texture t, float x, float y, float z, int layer) { #ifdef __CUDA_ARCH__ T type_dummy; typename __nv_tex_rmnf_ret::type retval; __nv_tex_surf_handler("__texCubemapLayered_rmnf_v2", &type_dummy, &retval, t, x, y, z, layer); return retval; #endif /* __CUDA_ARCH__ */ } // texCubemapLayeredLod template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmet_ret::type texCubemapLayeredLod(texture t, float x, float y, float z, int layer, float level) { #ifdef __CUDA_ARCH__ typename __nv_tex_rmet_ret::type temp; __nv_tex_surf_handler("__texCubemapLayeredLod_v2", (typename __nv_tex_rmet_cast::type)&temp, t, x, y, z, layer, level); return temp; #endif } template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmnf_ret::type texCubemapLayeredLod(texture t, float x, float y, float z, int layer, float level) { #ifdef __CUDA_ARCH__ T type_dummy; typename __nv_tex_rmnf_ret::type retval; __nv_tex_surf_handler("__texCubemapLayeredLod_rmnf_v2", &type_dummy, &retval, t, x, y, z, layer, level); return retval; #endif /* __CUDA_ARCH__ */ } // texCubemapGrad template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmet_ret::type texCubemapGrad(texture t, float x, float y, float z, float4 dPdx, float4 dPdy) { #ifdef __CUDA_ARCH__ typename __nv_tex_rmet_ret::type temp; __nv_tex_surf_handler("__texCubemapGrad_v2", (typename __nv_tex_rmet_cast::type)&temp, t, x, y, z, &dPdx, &dPdy); return temp; #endif } template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmnf_ret::type texCubemapGrad(texture t, float x, float y, float z, float4 dPdx, float4 dPdy) { #ifdef __CUDA_ARCH__ T type_dummy; typename __nv_tex_rmnf_ret::type retval; __nv_tex_surf_handler("__texCubemapGrad_rmnf_v2", &type_dummy, &retval, t, x, y, z, &dPdx, &dPdy); return retval; #endif /* __CUDA_ARCH__ */ } // texCubemapLayeredGrad template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmet_ret::type texCubemapLayeredGrad(texture t, float x, float y, float z, int layer, float4 dPdx, float4 dPdy) { #ifdef __CUDA_ARCH__ typename __nv_tex_rmet_ret::type temp; __nv_tex_surf_handler("__texCubemapLayeredGrad_v2", (typename __nv_tex_rmet_cast::type)&temp, t, x, y, z, layer, &dPdx, &dPdy); return temp; #endif } template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmnf_ret::type texCubemapLayeredGrad(texture t, float x, float y, float z, int layer, float4 dPdx, float4 dPdy) { #ifdef __CUDA_ARCH__ T type_dummy; typename __nv_tex_rmnf_ret::type retval; __nv_tex_surf_handler("__texCubemapLayeredGrad_rmnf_v2", &type_dummy, &retval,t, x, y, z, layer, &dPdx, &dPdy); return retval; #endif /* __CUDA_ARCH__ */ } // tex1DGrad template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmet_ret::type tex1DGrad(texture t, float x, float dPdx, float dPdy) { #ifdef __CUDA_ARCH__ typename __nv_tex_rmet_ret::type temp; __nv_tex_surf_handler("__tex1DGrad_v2", (typename __nv_tex_rmet_cast::type)&temp, t, x, dPdx, dPdy); return temp; #endif } template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmnf_ret::type tex1DGrad(texture t, float x, float dPdx, float dPdy) { #ifdef __CUDA_ARCH__ T type_dummy; typename __nv_tex_rmnf_ret::type retval; __nv_tex_surf_handler("__tex1DGrad_rmnf_v2", &type_dummy, &retval,t, x,dPdx, dPdy); return retval; #endif /* __CUDA_ARCH__ */ } // tex2DGrad template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmet_ret::type tex2DGrad(texture t, float x, float y, float2 dPdx, float2 dPdy) { #ifdef __CUDA_ARCH__ typename __nv_tex_rmet_ret::type temp; __nv_tex_surf_handler("__tex2DGrad_v2", (typename __nv_tex_rmet_cast::type)&temp, t, x, y, &dPdx, &dPdy); return temp; #endif } template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmnf_ret::type tex2DGrad(texture t, float x, float y, float2 dPdx, float2 dPdy) { #ifdef __CUDA_ARCH__ T type_dummy; typename __nv_tex_rmnf_ret::type retval; __nv_tex_surf_handler("__tex2DGrad_rmnf_v2", &type_dummy, &retval,t, x, y, &dPdx, &dPdy); return retval; #endif /* __CUDA_ARCH__ */ } // tex1DLayeredGrad template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmet_ret::type tex1DLayeredGrad(texture t, float x, int layer, float dPdx, float dPdy) { #ifdef __CUDA_ARCH__ typename __nv_tex_rmet_ret::type temp; __nv_tex_surf_handler("__tex1DLayeredGrad_v2",(typename __nv_tex_rmet_cast::type)&temp, t, x, layer, dPdx, dPdy); return temp; #endif } template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmnf_ret::type tex1DLayeredGrad(texture t, float x, int layer, float dPdx, float dPdy) { #ifdef __CUDA_ARCH__ T type_dummy; typename __nv_tex_rmnf_ret::type retval; __nv_tex_surf_handler("__tex1DLayeredGrad_rmnf_v2", &type_dummy, &retval,t, x, layer, dPdx, dPdy); return retval; #endif /* __CUDA_ARCH__ */ } // tex2DLayeredGrad template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmet_ret::type tex2DLayeredGrad(texture t, float x, float y, int layer, float2 dPdx, float2 dPdy) { #ifdef __CUDA_ARCH__ typename __nv_tex_rmet_ret::type temp; __nv_tex_surf_handler("__tex2DLayeredGrad_v2",(typename __nv_tex_rmet_cast::type)&temp, t, x, y, layer, &dPdx, &dPdy); return temp; #endif } template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmnf_ret::type tex2DLayeredGrad(texture t, float x, float y, int layer, float2 dPdx, float2 dPdy) { #ifdef __CUDA_ARCH__ T type_dummy; typename __nv_tex_rmnf_ret::type retval; __nv_tex_surf_handler("__tex2DLayeredGrad_rmnf_v2", &type_dummy, &retval,t, x, y, layer, &dPdx, &dPdy); return retval; #endif /* __CUDA_ARCH__ */ } // tex3DGrad template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmet_ret::type tex3DGrad(texture t, float x, float y, float z, float4 dPdx, float4 dPdy) { #ifdef __CUDA_ARCH__ typename __nv_tex_rmet_ret::type temp; __nv_tex_surf_handler("__tex3DGrad_v2", (typename __nv_tex_rmet_cast::type)&temp, t, x, y, z, &dPdx, &dPdy); return temp; #endif } template static __DEPRECATED__ __forceinline__ __device__ typename __nv_tex_rmnf_ret::type tex3DGrad(texture t, float x, float y, float z, float4 dPdx, float4 dPdy) { #ifdef __CUDA_ARCH__ T type_dummy; typename __nv_tex_rmnf_ret::type retval; __nv_tex_surf_handler("__tex3DGrad_rmnf_v2", &type_dummy, &retval,t, x, y, z, &dPdx, &dPdy); return retval; #endif /* __CUDA_ARCH__ */ } #undef __DEPRECATED__ #endif /* __cplusplus && __CUDACC__ */ #endif /* !__TEXTURE_FETCH_FUNCTIONS_H__ */