/*
* NVIDIA_COPYRIGHT_BEGIN
*
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
* NVIDIA_COPYRIGHT_END
*/
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
#if defined(_MSC_VER)
#pragma message("crt/nvfunctional is an internal header file and must not be used directly. Please use nvfunctional instead.")
#else
#warning "crt/nvfunctional is an internal header file and must not be used directly. Please use nvfunctional instead."
#endif
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_NV_LIBCXX_FUNCTIONAL_H__
#endif
#ifndef __NV_LIBCXX_FUNCTIONAL_H__
#define __NV_LIBCXX_FUNCTIONAL_H__
#if __cplusplus < 201103L
#if defined(_MSC_VER)
#if _MSC_VER < 1800
#error This library requires VS 2013 and above
#endif /* _MSC_VER < 1800 */
#else /* !_MSC_VER */
#error This library requires support for the ISO C++ 2011 standard
#endif /* _MSC_VER */
#endif /* __cplusplus */
#if defined(_MSC_VER)
#define __NV_ALIGNOF __alignof
#define __NV_NOEXCEPT
#define __NV_CONSTEXPR
#else /* !_MSC_VER */
#define __NV_ALIGNOF alignof
#define __NV_NOEXCEPT noexcept
#define __NV_CONSTEXPR constexpr
#endif /* _MSC_VER */
#include <type_traits>
#include <cstddef>
#include <new>
// n3290 20.8
namespace nvstd
{
namespace internal {
// D.8.1 base (deprecated) [depr.base]
template <class _Arg, class _Result>
struct unary_function
{
typedef _Arg argument_type;
typedef _Result result_type;
};
template <class _Arg1, class _Arg2, class _Result>
struct binary_function
{
typedef _Arg1 first_argument_type;
typedef _Arg2 second_argument_type;
typedef _Result result_type;
};
// move
template <class _T>
inline __device__ __host__
typename std::remove_reference<_T>::type&& move(_T&& __t) __NV_NOEXCEPT
{
return static_cast<typename std::remove_reference<_T>::type&&>(__t);
}
// 20.2.2 swap [utility.swap]
// swap
template<class _T,
class = typename std::enable_if<
std::is_move_constructible<_T>::value &&
std::is_move_assignable<_T>::value>::type>
inline __device__ __host__
void swap(_T& __a, _T& __b)
#if !defined(_MSC_VER)
noexcept(std::is_nothrow_move_constructible<_T>::value &&
std::is_nothrow_move_assignable<_T>::value)
#endif /* !defined(_MSC_VER) */
{
_T __t(internal::move(__a));
__a = internal::move(__b);
__b = internal::move(__t);
}
// 20.2.3 forward/move helpers [forward]
// forward
template <class _T>
inline __device__ __host__
_T&& forward(typename std::remove_reference<_T>::type& __t) __NV_NOEXCEPT
{
return static_cast<_T&&>(__t);
}
template <class _T>
inline __device__ __host__
_T&& forward(typename std::remove_reference<_T>::type&& __t) __NV_NOEXCEPT
{
static_assert(!std::is_lvalue_reference<_T>::value,
"Error: __t is instantiated with an lvalue reference type");
return static_cast<_T&&>(__t);
}
} // namespace internal
namespace __functional_helpers
{
struct __dummy_class;
// Store small functors locally:
// a functor is legitimate to local storage if it is one of the following types:
// * member object pointer;
// * member function pointer;
// * closure type of size less than or equal to the largest size of
// the above types;
// * function pointer;
// * any callable class whose size is less than or equal to
// the largest one of the above types;
union _Small_functor_types
{
void *__obj;
void (*__func_ptr)();
void (__dummy_class::*mem_fn_ptr)();
};
struct _Small_functor_data {
char __data[sizeof(_Small_functor_types)];
};
template <class _RetType, class ..._ArgTypes>
struct __maybe_base_function
{ };
template <class _RetType, class _T1>
struct __maybe_base_function<_RetType(_T1)>
: public internal::unary_function<_T1, _RetType>
{ };
template <class _RetType, class _T1, class _T2>
struct __maybe_base_function<_RetType(_T1, _T2)>
: public internal::binary_function<_T1, _T2, _RetType>
{ };
} // namespace __functional_helpers
// 20.8.11 Polymorphic function wrappers [func.wrap]
// 20.8.11.1 Class bad_function_call [func.wrap.badcall]
// unimplemented because of exception
// class bad_function_call : public std::exception
// 20.8.11.2 Class template function [func.wrap.func]
template<class> class function; // undefined
// Simplified version of template class function, which
// * does not support allocator_arg_t;
// * does not support target and target_type that rely on RTTI
// * does not throw bad_function_call exception on invoking a NULL target
template <class _RetType, class ..._ArgTypes>
class function<_RetType(_ArgTypes...)>
: public __functional_helpers::__maybe_base_function<_RetType(_ArgTypes...)>
{
__functional_helpers::_Small_functor_data __small_functor_data;
void *__obj;
typedef _RetType(*__meta_fn_type)(void *, _ArgTypes...);
__meta_fn_type __meta_fn;
typedef void(*__cloner_type)(function &, const function &);
__cloner_type __cloner;
typedef void(*__destructor_type)(function *);
__destructor_type __destructor;
#pragma nv_exec_check_disable
template <class _F>
__device__ __host__
__NV_CONSTEXPR bool __use_small_functor_data() const
{
return (sizeof(_F) <= sizeof(__small_functor_data) &&
__NV_ALIGNOF(_F) <= __NV_ALIGNOF(
__functional_helpers::_Small_functor_types));
}
#pragma nv_exec_check_disable
__device__ __host__
void* __get_small_functor_data() const
{
return (void*)(&__small_functor_data.__data[0]);
}
#pragma nv_exec_check_disable
__device__ __host__
bool __is_small_functor_data() const
{
return __obj == __get_small_functor_data();
}
#pragma nv_exec_check_disable
template <class _F>
__device__ __host__
static _F& __get_functor(void *__p)
{
return *((_F*)__p);
}
#pragma nv_exec_check_disable
template <class _F>
__device__ __host__
static bool __is_empty_functor(const _F& /*__p*/)
{
return false;
}
#pragma nv_exec_check_disable
template <class _F>
__device__ __host__
static bool __is_empty_functor(const _F* __p)
{
return !__p;
}
#pragma nv_exec_check_disable
template <class _Res, class _C>
__device__ __host__
static bool __is_empty_functor(const _Res _C::* __p)
{
return !__p;
}
#pragma nv_exec_check_disable
template <class _Res, class... _Args>
__device__ __host__
static bool __is_empty_functor(const function<_Res(_Args...)>& __p)
{
return !__p;
}
template <class _F>
struct __make_cloner
{
#pragma nv_exec_check_disable
__device__ __host__
static void __clone_data(function &__dest, const function &__src)
{
if (__dest.__use_small_functor_data<_F>()) {
__dest.__obj = __dest.__get_small_functor_data();
new (__dest.__obj) _F(__src.__get_functor<_F>(__src.__obj));
}
else {
__dest.__obj = new _F(__src.__get_functor<_F>(__src.__obj));
}
}
};
template <class _F>
struct __make_destructor
{
#pragma nv_exec_check_disable
__device__ __host__
static void __destruct(function *__fn)
{
if (__fn->__use_small_functor_data<_F>()) {
(__fn->__get_functor<_F>(__fn->__obj)).~_F();
}
else {
delete (_F*)(__fn->__obj);
}
}
};
// We cannot simple define __make_functor in the following way:
// template <class _T, _F>
// __make_functor;
// template <class _RetType1, class _F, class... _ArgTypes1>
// struct __make_functor<_RetType1(_ArgTypes1...), _F>
//
// because VS 2013 cannot unpack _RetType1(_ArgTypes1...)
template <class _RetType1, class _F, class... _ArgTypes1>
struct __make_functor
{
typedef _RetType1 type;
#pragma nv_exec_check_disable
__device__ __host__
static _RetType1 __invoke(void *__d, _ArgTypes1... __args)
{
return __get_functor<_F>(__d)(
internal::forward<_ArgTypes1>(__args)...);
}
};
template <class _RetType1, class _C, class _M, class... _ArgTypes1>
struct __make_functor<_RetType1, _M _C::*,_ArgTypes1...>
{
typedef _RetType1 type;
typedef _RetType1(*_Fn)(_ArgTypes1...);
#pragma nv_exec_check_disable
__device__ __host__
static _RetType1 __invoke(void *__d, _ArgTypes1... __args)
{
return __get_functor<_Fn>(__d)(
internal::forward<_ArgTypes1>(__args)...);
}
};
// workaround for GCC version below 4.8
#if (__GNUC__ == 4) && (__GNUC_MINOR__ < 8)
template <class _F>
struct __check_callability
: public std::integral_constant<bool,
!std::is_same<_F, std::nullptr_t>::value>
{ };
#elif defined(_MSC_VER)
// simulate VC 2013's behavior...
template <class _F>
struct __check_callability1
: public
std::integral_constant<bool,
// std::result_of does not handle member pointers well
std::is_member_pointer<_F>::value ||
std::is_convertible<
_RetType,
typename std::result_of<_F(_ArgTypes...)>::type
>::value
>
{ };
template <class _F>
struct __check_callability
: public std::integral_constant<
bool,
!std::is_same<_F, function>::value &&
__check_callability1<typename std::remove_cv<_F>::type>::value>
{ };
#else /* !((__GNUC__ == 4) && (__GNUC_MINOR__ < 8)) _MSC_VER */
template <class _F,
class _T = typename std::result_of<_F(_ArgTypes...)>::type>
struct __check_callability
: public std::integral_constant<
bool,
!std::is_same<_F, function>::value &&
std::is_convertible< _T, _RetType>::value>
{ };
#endif /* __GNUC__ == 4) && (__GNUC_MINOR__ < 8) */
#pragma nv_exec_check_disable
__device__ __host__
void __destroy()
{
if (__obj) {
__destructor(this);
__obj = 0;
}
}
#pragma nv_exec_check_disable
__device__ __host__
void __clear()
{
__obj = 0;
__meta_fn = 0;
__cloner = 0;
__destructor = 0;
}
public:
typedef _RetType result_type;
/*
* These typedef(s) are derived from __maybe_base_function
* typedef T1 argument_type; // only if sizeof...(ArgTypes) == 1 and
* // the type in ArgTypes is T1
* typedef T1 first_argument_type; // only if sizeof...(ArgTypes) == 2 and
* // ArgTypes contains T1 and T2
* typedef T2 second_argument_type; // only if sizeof...(ArgTypes) == 2 and
* // ArgTypes contains T1 and T2
*/
// 20.8.11.2.1 construct/copy/destroy [func.wrap.con]
#pragma nv_exec_check_disable
__device__ __host__
function() __NV_NOEXCEPT
: __obj(0), __meta_fn(0), __cloner(0), __destructor(0) {}
#pragma nv_exec_check_disable
__device__ __host__
function(std::nullptr_t) __NV_NOEXCEPT
: __obj(0), __meta_fn(0), __cloner(0), __destructor(0) {}
#pragma nv_exec_check_disable
__device__ __host__
function(const function &__fn)
{
if (__fn.__obj == 0) {
__clear();
}
else {
__meta_fn = __fn.__meta_fn;
__destructor = __fn.__destructor;
__fn.__cloner(*this, __fn);
__cloner = __fn.__cloner;
}
}
#pragma nv_exec_check_disable
__device__ __host__
function(function &&__fn)
{
__fn.swap(*this);
}
// VS 2013 cannot process __check_callability type trait.
// So, we check callability using static_assert instead of
// using SFINAE such as
// template<class _F,
// class = typename std::enable_if<
// __check_callability<_F>::value
// >::type>
#pragma nv_exec_check_disable
template<class _F>
__device__ __host__
function(_F);
// copy and swap
#pragma nv_exec_check_disable
__device__ __host__
function& operator=(const function& __fn)
{
function(__fn).swap(*this);
return *this;
}
#pragma nv_exec_check_disable
__device__ __host__
function& operator=(function&& __fn)
{
function(internal::move(__fn)).swap(*this);
return *this;
}
#pragma nv_exec_check_disable
__device__ __host__
function& operator=(std::nullptr_t)
{
__destroy();
return *this;
}
#pragma nv_exec_check_disable
template<class _F>
__device__ __host__
function&
operator=(_F&& __fn)
{
static_assert(__check_callability<_F>::value,
"Unable to create functor object!");
function(internal::forward<_F>(__fn)).swap(*this);
return *this;
}
#pragma nv_exec_check_disable
__device__ __host__
~function()
{
__destroy();
}
// 20.8.11.2.2 function modifiers [func.wrap.func.mod]
#pragma nv_exec_check_disable
__device__ __host__
void swap(function& __fn) __NV_NOEXCEPT
{
internal::swap(__meta_fn, __fn.__meta_fn);
internal::swap(__cloner, __fn.__cloner);
internal::swap(__destructor, __fn.__destructor);
if (__is_small_functor_data() && __fn.__is_small_functor_data()) {
internal::swap(__small_functor_data, __fn.__small_functor_data);
}
else if (__is_small_functor_data()) {
internal::swap(__small_functor_data, __fn.__small_functor_data);
internal::swap(__obj, __fn.__obj);
__fn.__obj = __fn.__get_small_functor_data();
}
else if (__fn.__is_small_functor_data()) {
internal::swap(__small_functor_data, __fn.__small_functor_data);
internal::swap(__obj, __fn.__obj);
__obj = __get_small_functor_data();
}
else {
internal::swap(__obj, __fn.__obj);
}
}
// 20.8.11.2.3 function capacity [func.wrap.func.cap]
#pragma nv_exec_check_disable
__device__ __host__
explicit operator bool() const __NV_NOEXCEPT
{
return __obj;
}
// 20.8.11.2.4 function invocation [func.wrap.func.inv]
// function::operator() can only be called in device code
// to avoid cross-execution space calls
#pragma nv_exec_check_disable
__device__ __host__
_RetType operator()(_ArgTypes...) const;
};
// Out-of-line definitions
#pragma nv_exec_check_disable
template<class _RetType, class... _ArgTypes>
template<class _F>
__device__ __host__
function<_RetType(_ArgTypes...)>::function(_F __fn)
: __obj(0), __meta_fn(0), __cloner(0), __destructor(0)
{
static_assert(__check_callability<_F>::value,
"Unable to construct functor object!");
if (__is_empty_functor(__fn))
return;
__meta_fn = &__make_functor<_RetType, _F, _ArgTypes...>::__invoke;
__cloner = &__make_cloner<_F>::__clone_data;
__destructor = &__make_destructor<_F>::__destruct;
if (__use_small_functor_data<_F>()) {
__obj = __get_small_functor_data();
new ((void*)__obj) _F(internal::move(__fn));
}
else {
__obj = new _F(internal::move(__fn));
}
}
#pragma nv_exec_check_disable
template <class _RetType, class..._ArgTypes>
__device__ __host__
_RetType
function<_RetType(_ArgTypes...)>::operator()(_ArgTypes... __args) const
{
return __meta_fn(__obj, internal::forward<_ArgTypes>(__args)...);
}
// 20.8.11.2.6, Null pointer comparisons:
#pragma nv_exec_check_disable
template <class _R, class... _ArgTypes>
__device__ __host__
bool operator==(const function<_R(_ArgTypes...)>& __fn, std::nullptr_t)
__NV_NOEXCEPT
{
return !__fn;
}
#pragma nv_exec_check_disable
template <class _R, class... _ArgTypes>
__device__ __host__
bool operator==(std::nullptr_t, const function<_R(_ArgTypes...)>& __fn)
__NV_NOEXCEPT
{
return !__fn;
}
#pragma nv_exec_check_disable
template <class _R, class... _ArgTypes>
__device__ __host__
bool operator!=(const function<_R(_ArgTypes...)>& __fn, std::nullptr_t)
__NV_NOEXCEPT
{
return static_cast<bool>(__fn);
}
#pragma nv_exec_check_disable
template <class _R, class... _ArgTypes>
__device__ __host__
bool operator!=(std::nullptr_t, const function<_R(_ArgTypes...)>& __fn)
__NV_NOEXCEPT
{
return static_cast<bool>(__fn);
}
// 20.8.11.2.7, specialized algorithms:
#pragma nv_exec_check_disable
template <class _R, class... _ArgTypes>
__device__ __host__
void swap(function<_R(_ArgTypes...)>& __fn1, function<_R(_ArgTypes...)>& __fn2)
{
__fn1.swap(__fn2);
}
} // namespace nvstd
#undef __NV_NOEXCEPT
#undef __NV_CONSTEXPR
#undef __NV_ALIGNOF
#endif // __NV_LIBCXX_FUNCTIONAL_H__
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_NV_LIBCXX_FUNCTIONAL_H__)
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_NV_LIBCXX_FUNCTIONAL_H__
#endif