10#ifndef XTENSOR_UTILS_HPP
11#define XTENSOR_UTILS_HPP
16#include <initializer_list>
24#include <xtl/xfunctional.hpp>
25#include <xtl/xmeta_utils.hpp>
26#include <xtl/xsequence.hpp>
27#include <xtl/xtype_traits.hpp>
29#include "../core/xtensor_config.hpp"
31#if (defined(_MSC_VER) && _MSC_VER >= 1910)
34#define NOEXCEPT(T) noexcept(T)
52 template <std::size_t I,
class... Args>
53 constexpr decltype(
auto) argument(Args&&... args)
noexcept;
55 template <
class R,
class F,
class... S>
56 R apply(std::size_t index, F&& func,
const std::tuple<S...>& s) NOEXCEPT(
noexcept(func(std::get<0>(s))));
58 template <
class T,
class S>
59 void nested_copy(T&& iter,
const S& s);
61 template <
class T,
class S>
62 void nested_copy(T&& iter, std::initializer_list<S> s);
65 bool resize_container(C& c,
typename C::size_type size);
67 template <
class T, std::
size_t N>
68 bool resize_container(std::array<T, N>& a,
typename std::array<T, N>::size_type size);
70 template <std::size_t... I>
73 template <std::size_t... I>
76 template <
class X,
class C>
79 template <
class X,
class C>
82 std::size_t normalize_axis(std::size_t dim, std::ptrdiff_t axis);
93 using void_t =
typename make_void<T...>::type;
101 template <
class... T>
107 template <
class T,
class R>
108 using disable_integral_t = std::enable_if_t<!xtl::is_integral<T>::value, R>;
124 template <
template <
class...>
class TT,
class T>
129 template <
template <
class...>
class TT,
class... Ts>
143 template <
class C,
class R,
class... Args>
146 typedef R type(Args...);
149 template <
class C,
class R,
class... Args>
152 typedef R type(Args...);
164 template <
class F,
size_t... I,
class... Ts>
165 void for_each(F&& f, std::tuple<Ts...>& t, std::index_sequence<I...>)
noexcept(
166 (
noexcept(f(std::get<I>(t))) && ...)
169 (f(std::get<I>(t)), ...);
172 template <
class F,
size_t... I,
class... Ts>
173 void for_each(F&& f,
const std::tuple<Ts...>& t, std::index_sequence<I...>)
noexcept(
174 (
noexcept(f(std::get<I>(t))) && ...)
177 (f(std::get<I>(t)), ...);
181 template <
class F,
class... Ts>
182 inline void for_each(F&& f, std::tuple<Ts...>& t)
noexcept(
183 noexcept(detail::for_each(std::forward<F>(f), t, std::make_index_sequence<
sizeof...(Ts)>{}))
186 detail::for_each(std::forward<F>(f), t, std::make_index_sequence<
sizeof...(Ts)>{});
189 template <
class F,
class... Ts>
190 inline void for_each(F&& f,
const std::tuple<Ts...>& t)
noexcept(
191 noexcept(detail::for_each(std::forward<F>(f), t, std::make_index_sequence<
sizeof...(Ts)>{}))
194 detail::for_each(std::forward<F>(f), t, std::make_index_sequence<
sizeof...(Ts)>{});
205 template <
class F,
class R,
class... T,
size_t... I>
206 R accumulate_impl(F&& f, R init,
const std::tuple<T...>& t, std::index_sequence<I...> )
noexcept(
207 (
noexcept(f(init, std::get<I>(t))) && ...)
211 auto wrapper = [&](
const auto& i,
const auto& j)
215 (wrapper(res, std::get<I>(t)), ...);
220 template <
class F,
class R,
class... T>
221 inline R
accumulate(F&& f, R init,
const std::tuple<T...>& t)
noexcept(
222 noexcept(detail::accumulate_impl(std::forward<F>(f), init, t, std::make_index_sequence<
sizeof...(T)>{}))
225 return detail::accumulate_impl(std::forward<F>(f), init, t, std::make_index_sequence<
sizeof...(T)>{});
236 template <std::
size_t I>
239 template <
class Arg,
class... Args>
240 static constexpr decltype(
auto) get(Arg&& , Args&&... args)
noexcept
242 return getter<I - 1>::get(std::forward<Args>(args)...);
249 template <
class Arg,
class... Args>
250 static constexpr Arg&& get(Arg&&
arg, Args&&... ) noexcept
252 return std::forward<Arg>(
arg);
257 template <std::size_t I,
class... Args>
258 constexpr decltype(
auto) argument(Args&&... args)
noexcept
260 static_assert(I <
sizeof...(Args),
"I should be lesser than sizeof...(Args)");
261 return detail::getter<I>::get(std::forward<Args>(args)...);
268 template <
class R,
class F,
class... S>
269 inline R apply(std::size_t index, F&& func,
const std::tuple<S...>& s)
270 NOEXCEPT(
noexcept(func(std::get<0>(s))))
272 XTENSOR_ASSERT(
sizeof...(S) > index);
274 [&](
const S&... args) -> R
276 auto f_impl = [&](
auto&& self,
auto&& i,
auto&& h,
auto&&... t) -> R
280 return static_cast<R
>(func(h));
282 if constexpr (
sizeof...(t) > 0)
284 return self(self, std::size_t{i + 1}, t...);
288 return f_impl(f_impl, std::size_t{0}, args...);
298 template <
class T, std::
size_t I>
310 template <
class T, std::
size_t I>
311 using nested_initializer_list_t =
typename nested_initializer_list<T, I>::type;
317 template <
class T,
class S>
318 inline void nested_copy(T&& iter,
const S& s)
323 template <
class T,
class S>
324 inline void nested_copy(T&& iter, std::initializer_list<S> s)
326 for (
auto it = s.begin(); it != s.end(); ++it)
328 nested_copy(std::forward<T>(iter), *it);
336 inline bool resize_container(C& c,
typename C::size_type size)
342 template <
class T, std::
size_t N>
343 inline bool resize_container(std::array<T, N>& ,
typename std::array<T, N>::size_type size)
348 template <std::size_t... I>
349 inline bool resize_container(xt::fixed_shape<I...>&, std::size_t size)
351 return sizeof...(I) == size;
359 inline std::size_t normalize_axis(std::size_t dim, std::ptrdiff_t axis)
361 return axis < 0 ? static_cast<std::size_t>(
static_cast<std::ptrdiff_t
>(dim) + axis)
362 : static_cast<std::size_t>(axis);
365 template <
class E,
class C>
366 inline std::enable_if_t<
367 !xtl::is_integral<std::decay_t<C>>::value && xtl::is_signed<typename std::decay_t<C>::value_type>::value,
368 rebind_container_t<std::size_t, std::decay_t<C>>>
369 normalize_axis(E& expr, C&& axes)
371 rebind_container_t<std::size_t, std::decay_t<C>> res;
372 resize_container(res, axes.size());
374 for (std::size_t i = 0; i < axes.size(); ++i)
376 res[i] = normalize_axis(expr.dimension(), axes[i]);
379 XTENSOR_ASSERT(std::all_of(
384 return ax_el < expr.dimension();
391 template <
class C,
class E>
392 inline std::enable_if_t<
393 !xtl::is_integral<std::decay_t<C>>::value && std::is_unsigned<typename std::decay_t<C>::value_type>::value,
395 normalize_axis(E& expr, C&& axes)
397 static_cast<void>(expr);
398 XTENSOR_ASSERT(std::all_of(
403 return ax_el < expr.dimension();
406 return std::forward<C>(axes);
409 template <
class R,
class E,
class C>
410 inline auto forward_normalize(E& expr, C&& axes)
411 -> std::enable_if_t<xtl::is_signed<std::decay_t<
decltype(*std::begin(axes))>>::value, R>
414 xt::resize_container(res, std::size(axes));
415 auto dim = expr.dimension();
422 return normalize_axis(dim, ax_el);
426 XTENSOR_ASSERT(std::all_of(
431 return ax_el < expr.dimension();
438 template <
class R,
class E,
class C>
439 inline auto forward_normalize(E& expr, C&& axes) -> std::enable_if_t<
440 !xtl::is_signed<std::decay_t<
decltype(*std::begin(axes))>>::value && !std::is_same<R, std::decay_t<C>>::value,
443 static_cast<void>(expr);
446 xt::resize_container(res, std::size(axes));
447 std::copy(std::begin(axes), std::end(axes), std::begin(res));
448 XTENSOR_ASSERT(std::all_of(
453 return ax_el < expr.dimension();
459 template <
class R,
class E,
class C>
460 inline auto forward_normalize(E& expr, C&& axes) -> std::enable_if_t<
461 !xtl::is_signed<std::decay_t<
decltype(*std::begin(axes))>>::value && std::is_same<R, std::decay_t<C>>::value,
464 static_cast<void>(expr);
465 XTENSOR_ASSERT(std::all_of(
470 return ax_el < expr.dimension();
473 return std::move(axes);
480 template <
class T,
class =
void_t<>>
489 using type =
typename T::value_type;
493 using get_value_type_t =
typename get_value_type<T>::type;
501 template <std::size_t I,
template <
typename... Args>
class T, typename... Args>
502 decltype(auto) get(T<Args...>&& v)
504 return std::get<I>(
static_cast<std::tuple<Args...
>&&>(v));
507 template <std::size_t I,
template <
typename... Args>
class T, typename... Args>
508 decltype(auto) get(T<Args...>& v)
510 return std::get<I>(
static_cast<std::tuple<Args...
>&>(v));
513 template <std::size_t I,
template <
typename... Args>
class T, typename... Args>
514 decltype(auto) get(const T<Args...>& v)
516 return std::get<I>(
static_cast<const std::tuple<Args...
>&>(v));
525 template <
class T, std::size_t N, std::size_t... I>
526 constexpr std::array<std::remove_cv_t<T>, N> to_array_impl(T (&a)[N], std::index_sequence<I...>)
532 template <
class T, std::
size_t N>
533 constexpr std::array<std::remove_cv_t<T>, N> to_array(T (&a)[N])
535 return detail::to_array_impl(a, std::make_index_sequence<N>{});
542 template <
class T,
class =
void>
553 std::is_same<typename std::remove_cv<typename xcontainer_inner_types<T>::storage_type>::type, invalid_type>>
561 template <
class E,
class =
void>
574 template <
class E,
class =
void>
584 template <
class E,
class =
void>
601 template <
class E,
class =
void>
610 decltype(*std::declval<const E>(), std::declval<const E>() == std::declval<const E>(), std::declval<const E>() != std::declval<const E>(), ++(*std::declval<E*>()), (*std::declval<E*>())++, std::true_type())>>
615 template <
typename E>
622#if defined(_GLIBCXX_RELEASE) && _GLIBCXX_RELEASE >= 7
624#define XTENSOR_GLIBCXX_USE_CXX11_ABI 1
626#if defined(_GLIBCXX_USE_CXX11_ABI)
627#if _GLIBCXX_USE_CXX11_ABI || (defined(_GLIBCXX_USE_DUAL_ABI) && !_GLIBCXX_USE_DUAL_ABI)
628#define XTENSOR_GLIBCXX_USE_CXX11_ABI 1
633#if !defined(__GNUG__) || defined(_LIBCPP_VERSION) || defined(XTENSOR_GLIBCXX_USE_CXX11_ABI)
636 using xtrivially_default_constructible = std::is_trivially_default_constructible<T>;
641 using xtrivially_default_constructible = std::has_trivial_default_constructor<T>;
644#undef XTENSOR_GLIBCXX_USE_CXX11_ABI
650 template <
bool condition,
class T>
662 inline auto operator()(U&& u)
const
664 return static_cast<T
>(std::forward<U>(u));
676 template <
bool condition,
class T,
class U>
686 namespace alloc_tracking
688 inline bool& enabled()
699 inline void disable()
711 template <
class T,
class A, alloc_tracking::policy P>
712 struct tracking_allocator :
private A
715 using value_type =
typename A::value_type;
716 using reference = value_type&;
717 using const_reference =
const value_type&;
718 using pointer =
typename std::allocator_traits<A>::pointer;
719 using const_pointer =
typename std::allocator_traits<A>::const_pointer;
720 using size_type =
typename std::allocator_traits<A>::size_type;
721 using difference_type =
typename std::allocator_traits<A>::difference_type;
723 tracking_allocator() =
default;
725 T* allocate(std::size_t n)
727 if (alloc_tracking::enabled())
729 if (P == alloc_tracking::print)
731 std::cout <<
"xtensor allocating: " << n <<
"" << std::endl;
733 else if (P == alloc_tracking::assert)
737 "xtensor allocation of " + std::to_string(n) +
" elements detected"
741 return base_type::allocate(n);
744 using base_type::deallocate;
747#if ((defined(__cplusplus) && __cplusplus < 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG < 202002L))
748 using base_type::construct;
749 using base_type::destroy;
755 using traits = std::allocator_traits<A>;
756 using other = tracking_allocator<U, typename traits::template rebind_alloc<U>, P>;
760 template <
class T,
class AT, alloc_tracking::policy PT,
class U,
class AU, alloc_tracking::policy PU>
763 return std::is_same<AT, AU>::value;
766 template <
class T,
class AT, alloc_tracking::policy PT,
class U,
class AU, alloc_tracking::policy PU>
767 inline bool operator!=(
const tracking_allocator<T, AT, PT>& a,
const tracking_allocator<U, AU, PU>& b)
776 template <
class E1,
class E2,
class =
void>
781 template <
class E1,
class E2>
782 struct has_assign_to<E1, E2, void_t<decltype(std::declval<const E2&>().assign_to(std::declval<E1&>()))>>
787 template <
class E1,
class E2>
794 template <
class T,
class Enable =
void>
800 struct has_memory_address<T, void_t<decltype(std::addressof(*std::declval<T>().begin()))>> : std::true_type
804 template <
typename T>
806 template <
typename T>
813 const uintptr_t m_first = 0;
814 const uintptr_t m_last = 0;
816 explicit memory_range() =
default;
819 explicit memory_range(T* first, T* last)
820 : m_first(
reinterpret_cast<uintptr_t
>(last < first ? last : first))
821 , m_last(
reinterpret_cast<uintptr_t
>(last < first ? first : last))
826 bool overlaps(T* first, T* last)
const
830 return reinterpret_cast<uintptr_t
>(first) <= m_last
831 &&
reinterpret_cast<uintptr_t
>(last) >= m_first;
835 return reinterpret_cast<uintptr_t
>(last) <= m_last
836 &&
reinterpret_cast<uintptr_t
>(first) >= m_first;
841 template <
class E,
class Enable =
void>
844 static bool check_overlap(
const E&,
const memory_range&)
853 static bool check_overlap(
const E& expr,
const memory_range& dst_range)
855 if (expr.size() == 0)
861 return dst_range.overlaps(std::addressof(*expr.begin()), std::addressof(*expr.rbegin()));
866 struct overlapping_memory_checker_base
870 explicit overlapping_memory_checker_base() =
default;
872 explicit overlapping_memory_checker_base(
memory_range dst_memory_range)
873 : m_dst_range(std::move(dst_memory_range))
878 bool check_overlap(
const E& expr)
const
880 if (!m_dst_range.m_first || !m_dst_range.m_last)
886 return overlapping_memory_checker_traits<E>::check_overlap(expr, m_dst_range);
891 template <
class Dst,
class Enable =
void>
892 struct overlapping_memory_checker : overlapping_memory_checker_base
894 explicit overlapping_memory_checker(
const Dst&)
895 : overlapping_memory_checker_base()
901 struct overlapping_memory_checker<Dst, std::enable_if_t<has_memory_address<Dst>::value>>
902 : overlapping_memory_checker_base
904 explicit overlapping_memory_checker(
const Dst& aDst)
905 : overlapping_memory_checker_base(
908 if (aDst.size() == 0)
914 return memory_range(std::addressof(*aDst.begin()), std::addressof(*aDst.rbegin()));
923 auto make_overlapping_memory_checker(
const Dst& a_dst)
932 template <
class X,
template <
class,
class>
class C,
class T,
class A>
935 using traits = std::allocator_traits<A>;
936 using allocator =
typename traits::template rebind_alloc<X>;
937 using type = C<X, allocator>;
941#ifdef __cpp_template_template_args
942 template <
class X,
class T, std::
size_t N>
945 using type = std::array<X, N>;
948 template <
class X,
template <
class, std::
size_t>
class C,
class T, std::size_t N>
951 using type = C<X, N>;
965 template <std::size_t... I>
971 using type = std::array<std::ptrdiff_t,
sizeof...(I)>;
974 template <
class CP,
class O,
class A>
977 template <
class CP,
class O,
class A>
983 using type = std::vector<
984 typename xbuffer_adaptor<CP, O, A>::value_type,
985 typename xbuffer_adaptor<CP, O, A>::allocator_type>;
990 using get_strides_t =
typename get_strides_type<C>::type;
999 using storage_type = std::decay_t<ST>;
1000 using type = std::conditional_t<
1001 std::is_const<std::remove_reference_t<ST>>::value,
1002 typename storage_type::const_reference,
1003 typename storage_type::reference>;
1007 using inner_reference_t =
typename inner_reference<ST>::type;
1013 template <
class E,
typename =
void>
1016 static constexpr std::size_t value = SIZE_MAX;
1022 static constexpr std::size_t value = E::rank;
1032 using type = std::integral_constant<bool, get_rank<std::decay_t<E>>::value != SIZE_MAX>;
1042 template <
class E,
size_t N>
1045 using type = std::integral_constant<bool, get_rank<std::decay_t<E>>::value == N>;
1048 template <
class E,
size_t N>
Fixed shape implementation for compile time defined arrays.
auto arg(E &&e) noexcept
Calculates the phase angle (in radians) elementwise for the complex numbers in e.
auto strides(const E &e, stride_type type=stride_type::normal) noexcept
Get strides of an object.
standard mathematical functions for xexpressions
auto accumulate(F &&f, E &&e, EVS evaluation_strategy=EVS())
Accumulate and flatten array NOTE This function is not lazy!
auto conditional_cast(U &&u)
Perform a type cast when a condition is true.