10#ifndef XTENSOR_UTILS_HPP
11#define XTENSOR_UTILS_HPP
18#include <initializer_list>
26#include <xtl/xfunctional.hpp>
27#include <xtl/xmeta_utils.hpp>
28#include <xtl/xsequence.hpp>
29#include <xtl/xtype_traits.hpp>
31#include "xtensor_config.hpp"
36#define NOEXCEPT(T) noexcept(T)
54 template <std::size_t I,
class... Args>
55 constexpr decltype(
auto) argument(Args&&... args)
noexcept;
57 template <
class R,
class F,
class... S>
58 R apply(std::size_t index, F&& func,
const std::tuple<S...>& s) NOEXCEPT(
noexcept(func(std::get<0>(s))));
60 template <
class T,
class S>
61 void nested_copy(T&& iter,
const S& s);
63 template <
class T,
class S>
64 void nested_copy(T&& iter, std::initializer_list<S> s);
67 bool resize_container(C& c,
typename C::size_type size);
69 template <
class T, std::
size_t N>
70 bool resize_container(std::array<T, N>& a,
typename std::array<T, N>::size_type size);
72 template <std::size_t... I>
75 template <std::size_t... I>
76 bool resize_container(fixed_shape<I...>& a, std::size_t size);
78 template <
class X,
class C>
81 template <
class X,
class C>
84 std::size_t normalize_axis(std::size_t
dim, std::ptrdiff_t axis);
95 using void_t =
typename make_void<T...>::type;
103 template <
class... T>
109 template <
class T,
class R>
110 using disable_integral_t = std::enable_if_t<!xtl::is_integral<T>::value,
R>;
126 template <
template <
class...>
class TT,
class T>
131 template <
template <
class...>
class TT,
class...
Ts>
145 template <
class C,
class R,
class...
Args>
148 typedef R type(
Args...);
151 template <
class C,
class R,
class...
Args>
154 typedef R type(
Args...);
166 template <std::size_t I,
class F,
class... T>
167 inline typename std::enable_if<I ==
sizeof...(T),
void>::type
168 for_each_impl(
F&& , std::tuple<T...>& )
noexcept
172 template <std::size_t I,
class F,
class... T>
173 inline typename std::enable_if < I<
sizeof...(T),
void>::type
174 for_each_impl(F&& f, std::tuple<T...>& t)
noexcept(
noexcept(f(std::get<I>(t))))
177 for_each_impl<I + 1, F, T...>(std::forward<F>(f), t);
181 template <
class F,
class... T>
182 inline void for_each(F&& f, std::tuple<T...>& t)
noexcept(
183 noexcept(detail::for_each_impl<0, F, T...>(std::forward<F>(f), t))
186 detail::for_each_impl<0, F, T...>(std::forward<F>(f), t);
191 template <std::size_t I,
class F,
class... T>
192 inline typename std::enable_if<I ==
sizeof...(T),
void>::type
193 for_each_impl(F&& ,
const std::tuple<T...>& )
noexcept
197 template <std::size_t I,
class F,
class... T>
198 inline typename std::enable_if < I<
sizeof...(T),
void>::type
199 for_each_impl(F&& f,
const std::tuple<T...>& t)
noexcept(
noexcept(f(std::get<I>(t))))
202 for_each_impl<I + 1, F, T...>(std::forward<F>(f), t);
206 template <
class F,
class... T>
207 inline void for_each(F&& f,
const std::tuple<T...>& t)
noexcept(
208 noexcept(detail::for_each_impl<0, F, T...>(std::forward<F>(f), t))
211 detail::for_each_impl<0, F, T...>(std::forward<F>(f), t);
222 template <std::size_t I,
class F,
class R,
class... T>
223 inline std::enable_if_t<I ==
sizeof...(T), R>
224 accumulate_impl(F&& , R init,
const std::tuple<T...>& )
noexcept
229 template <std::size_t I,
class F,
class R,
class... T>
230 inline std::enable_if_t < I<
sizeof...(T), R>
231 accumulate_impl(F&& f, R init,
const std::tuple<T...>& t)
noexcept(
noexcept(f(init, std::get<I>(t))))
233 R res = f(init, std::get<I>(t));
234 return accumulate_impl<I + 1, F, R, T...>(std::forward<F>(f), res, t);
238 template <
class F,
class R,
class... T>
239 inline R
accumulate(F&& f, R init,
const std::tuple<T...>& t)
noexcept(
240 noexcept(detail::accumulate_impl<0, F, R, T...>(std::forward<F>(f), init, t))
243 return detail::accumulate_impl<0, F, R, T...>(std::forward<F>(f), init, t);
254 template <std::
size_t I>
257 template <
class Arg,
class... Args>
258 static constexpr decltype(
auto) get(Arg&& , Args&&... args)
noexcept
260 return getter<I - 1>::get(std::forward<Args>(args)...);
267 template <
class Arg,
class... Args>
268 static constexpr Arg&& get(Arg&& arg, Args&&... ) noexcept
270 return std::forward<Arg>(arg);
275 template <std::size_t I,
class... Args>
276 constexpr decltype(
auto) argument(Args&&... args)
noexcept
278 static_assert(I <
sizeof...(Args),
"I should be lesser than sizeof...(Args)");
279 return detail::getter<I>::get(std::forward<Args>(args)...);
288 template <
class R,
class F, std::size_t I,
class... S>
289 R apply_one(F&& func,
const std::tuple<S...>& s) NOEXCEPT(
noexcept(func(std::get<I>(s))))
291 return static_cast<R
>(func(std::get<I>(s)));
294 template <
class R,
class F, std::size_t... I,
class... S>
295 R apply(std::size_t index, F&& func, std::index_sequence<I...> ,
const std::tuple<S...>& s)
296 NOEXCEPT(
noexcept(func(std::get<0>(s))))
298 using FT = std::add_pointer_t<R(F&&,
const std::tuple<S...>&)>;
299 static const std::array<FT,
sizeof...(I)> ar = {{&apply_one<R, F, I, S...>...}};
300 return ar[index](std::forward<F>(func), s);
304 template <
class R,
class F,
class... S>
305 inline R apply(std::size_t index, F&& func,
const std::tuple<S...>& s)
306 NOEXCEPT(
noexcept(func(std::get<0>(s))))
308 return detail::apply<R>(index, std::forward<F>(func), std::make_index_sequence<
sizeof...(S)>(), s);
315 template <
class T, std::
size_t I>
327 template <
class T, std::
size_t I>
328 using nested_initializer_list_t =
typename nested_initializer_list<T, I>::type;
334 template <
class T,
class S>
335 inline void nested_copy(T&&
iter,
const S&
s)
340 template <
class T,
class S>
341 inline void nested_copy(T&& iter, std::initializer_list<S> s)
343 for (
auto it = s.begin(); it != s.end(); ++it)
345 nested_copy(std::forward<T>(iter), *it);
353 inline bool resize_container(C& c,
typename C::size_type size)
359 template <
class T, std::
size_t N>
360 inline bool resize_container(std::array<T, N>& ,
typename std::array<T, N>::size_type size)
365 template <std::size_t... I>
368 return sizeof...(I) == size;
376 inline std::size_t normalize_axis(std::size_t dim, std::ptrdiff_t axis)
378 return axis < 0 ? static_cast<std::size_t>(
static_cast<std::ptrdiff_t
>(dim) + axis)
379 : static_cast<std::size_t>(axis);
382 template <
class E,
class C>
383 inline std::enable_if_t<
384 !xtl::is_integral<std::decay_t<C>>::value && xtl::is_signed<typename std::decay_t<C>::value_type>::value,
385 rebind_container_t<std::size_t, std::decay_t<C>>>
386 normalize_axis(E& expr, C&& axes)
388 rebind_container_t<std::size_t, std::decay_t<C>> res;
389 resize_container(res, axes.size());
391 for (std::size_t i = 0; i < axes.size(); ++i)
393 res[i] = normalize_axis(expr.dimension(), axes[i]);
396 XTENSOR_ASSERT(std::all_of(
401 return ax_el < expr.dimension();
408 template <
class C,
class E>
409 inline std::enable_if_t<
410 !xtl::is_integral<std::decay_t<C>>::value && std::is_unsigned<typename std::decay_t<C>::value_type>::value,
412 normalize_axis(E& expr, C&& axes)
414 static_cast<void>(expr);
415 XTENSOR_ASSERT(std::all_of(
420 return ax_el < expr.dimension();
423 return std::forward<C>(axes);
426 template <
class R,
class E,
class C>
427 inline auto forward_normalize(E& expr, C&& axes)
428 -> std::enable_if_t<xtl::is_signed<std::decay_t<
decltype(*std::begin(axes))>>::value, R>
431 xt::resize_container(res, xtl::sequence_size(axes));
432 auto dim = expr.dimension();
439 return normalize_axis(dim, ax_el);
443 XTENSOR_ASSERT(std::all_of(
448 return ax_el < expr.dimension();
455 template <
class R,
class E,
class C>
456 inline auto forward_normalize(E& expr, C&& axes) -> std::enable_if_t<
457 !xtl::is_signed<std::decay_t<
decltype(*std::begin(axes))>>::value && !std::is_same<R, std::decay_t<C>>::value,
460 static_cast<void>(expr);
463 xt::resize_container(res, xtl::sequence_size(axes));
464 std::copy(std::begin(axes), std::end(axes), std::begin(res));
465 XTENSOR_ASSERT(std::all_of(
470 return ax_el < expr.dimension();
476 template <
class R,
class E,
class C>
477 inline auto forward_normalize(E& expr, C&& axes) -> std::enable_if_t<
478 !xtl::is_signed<std::decay_t<
decltype(*std::begin(axes))>>::value && std::is_same<R, std::decay_t<C>>::value,
481 static_cast<void>(expr);
482 XTENSOR_ASSERT(std::all_of(
487 return ax_el < expr.dimension();
490 return std::move(axes);
497 template <
class T,
class =
void_t<>>
506 using type =
typename T::value_type;
510 using get_value_type_t =
typename get_value_type<T>::type;
518 template <std::size_t I,
template <
typename...
Args>
class T,
typename...
Args>
521 return std::get<I>(
static_cast<std::tuple<
Args...
>&&>(
v));
524 template <std::size_t I,
template <
typename... Args>
class T, typename... Args>
525 decltype(auto) get(T<Args...>& v)
527 return std::get<I>(
static_cast<std::tuple<Args...
>&>(v));
530 template <std::size_t I,
template <
typename... Args>
class T, typename... Args>
531 decltype(auto) get(const T<Args...>& v)
533 return std::get<I>(
static_cast<const std::tuple<Args...
>&>(v));
545 bool = std::is_const<std::remove_reference_t<T>>::value,
546 bool = std::is_volatile<std::remove_reference_t<T>>::value>
552 template <
class T,
class U>
553 struct apply_cv_impl<T, U, true, false>
555 using type =
const U;
558 template <
class T,
class U>
559 struct apply_cv_impl<T, U, false, true>
561 using type =
volatile U;
564 template <
class T,
class U>
565 struct apply_cv_impl<T, U, true, true>
567 using type =
const volatile U;
570 template <
class T,
class U>
571 struct apply_cv_impl<T&, U,
false,
false>
576 template <
class T,
class U>
577 struct apply_cv_impl<T&, U,
true,
false>
579 using type =
const U&;
582 template <
class T,
class U>
583 struct apply_cv_impl<T&, U,
false,
true>
585 using type =
volatile U&;
588 template <
class T,
class U>
589 struct apply_cv_impl<T&, U,
true,
true>
591 using type =
const volatile U&;
595 template <
class T,
class U>
601 template <
class T,
class U>
602 using apply_cv_t =
typename apply_cv<T, U>::type;
610 template <
class T, std::size_t N, std::size_t... I>
611 constexpr std::array<std::remove_cv_t<T>, N> to_array_impl(T (&
a)[N], std::index_sequence<I...>)
617 template <
class T, std::
size_t N>
618 constexpr std::array<std::remove_cv_t<T>, N> to_array(T (&a)[N])
620 return detail::to_array_impl(a, std::make_index_sequence<N>{});
629 constexpr auto sequence_size(
const C& c) ->
decltype(c.size())
635 template <
class T, std::
size_t N>
636 constexpr std::size_t sequence_size(
const T (&)[N])
645 template <
class T,
class =
void>
656 std::is_same<typename std::remove_cv<typename xcontainer_inner_types<T>::storage_type>::type, invalid_type>>
664 template <
class E,
class =
void>
674 template <
class E,
class =
void>
684 template <
class E,
class =
void>
698 template <
class E,
class =
void>
707 decltype(*std::declval<const E>(), std::declval<const E>() == std::declval<const E>(), std::declval<const E>() != std::declval<const E>(), ++(*std::declval<E*>()), (*std::declval<E*>())++, std::true_type())>>
716#if defined(_GLIBCXX_RELEASE) && _GLIBCXX_RELEASE >= 7
718#define XTENSOR_GLIBCXX_USE_CXX11_ABI 1
720#if defined(_GLIBCXX_USE_CXX11_ABI)
721#if _GLIBCXX_USE_CXX11_ABI || (defined(_GLIBCXX_USE_DUAL_ABI) && !_GLIBCXX_USE_DUAL_ABI)
722#define XTENSOR_GLIBCXX_USE_CXX11_ABI 1
727#if !defined(__GNUG__) || defined(_LIBCPP_VERSION) || defined(XTENSOR_GLIBCXX_USE_CXX11_ABI)
730 using xtrivially_default_constructible = std::is_trivially_default_constructible<T>;
735 using xtrivially_default_constructible = std::has_trivial_default_constructor<T>;
738#undef XTENSOR_GLIBCXX_USE_CXX11_ABI
744 template <
bool condition,
class T>
756 inline auto operator()(
U&&
u)
const
758 return static_cast<T
>(std::forward<U>(
u));
770 template <
bool condition,
class T,
class U>
780 namespace alloc_tracking
782 inline bool& enabled()
793 inline void disable()
805 template <
class T,
class A, alloc_tracking::policy P>
809 using value_type =
typename A::value_type;
810 using reference = value_type&;
811 using const_reference =
const value_type&;
812 using pointer =
typename std::allocator_traits<A>::pointer;
813 using const_pointer =
typename std::allocator_traits<A>::const_pointer;
814 using size_type =
typename std::allocator_traits<A>::size_type;
815 using difference_type =
typename std::allocator_traits<A>::difference_type;
819 T* allocate(std::size_t
n)
821 if (alloc_tracking::enabled())
823 if (
P == alloc_tracking::print)
825 std::cout <<
"xtensor allocating: " <<
n <<
"" << std::endl;
827 else if (
P == alloc_tracking::assert)
831 "xtensor allocation of " + std::to_string(
n) +
" elements detected"
835 return base_type::allocate(
n);
838 using base_type::deallocate;
841#if ((defined(__cplusplus) && __cplusplus < 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG < 202002L))
842 using base_type::construct;
843 using base_type::destroy;
849 using traits = std::allocator_traits<A>;
854 template <
class T,
class AT, alloc_tracking::policy PT,
class U,
class AU, alloc_tracking::policy PU>
857 return std::is_same<AT, AU>::value;
860 template <
class T,
class AT, alloc_tracking::policy PT,
class U,
class AU, alloc_tracking::policy PU>
861 inline bool operator!=(
const tracking_allocator<T, AT, PT>& a,
const tracking_allocator<U, AU, PU>& b)
870 template <
class E1,
class E2,
class =
void>
875 template <
class E1,
class E2>
885 template <
class T,
class Enable =
void>
927 template <
class E,
class Enable =
void>
930 static bool check_overlap(
const E&,
const memory_range&)
941 if (
expr.size() == 0)
947 return dst_range.overlaps(std::addressof(*
expr.begin()), std::addressof(*
expr.rbegin()));
964 bool check_overlap(
const E&
expr)
const
966 if (!m_dst_range.m_first || !m_dst_range.m_last)
977 template <
class Dst,
class Enable =
void>
994 if (
aDst.size() == 0)
996 return memory_range();
1000 return memory_range(std::addressof(*aDst.begin()), std::addressof(*aDst.rbegin()));
1008 template <
class Dst>
1009 auto make_overlapping_memory_checker(
const Dst&
a_dst)
1018 template <
class X,
template <
class,
class>
class C,
class T,
class A>
1021 using traits = std::allocator_traits<A>;
1026#if defined(__GNUC__) && __GNUC__ > 6 && !defined(__clang__) && __cplusplus >= 201703L
1027 template <
class X,
class T, std::
size_t N>
1030 using type = std::array<X, N>;
1033 template <
class X,
template <
class, std::
size_t>
class C,
class T, std::size_t N>
1050 template <std::size_t... I>
1056 using type = std::array<std::ptrdiff_t,
sizeof...(I)>;
1059 template <
class CP,
class O,
class A>
1062 template <
class CP,
class O,
class A>
1068 using type = std::vector<
1069 typename xbuffer_adaptor<CP, O, A>::value_type,
1070 typename xbuffer_adaptor<CP, O, A>::allocator_type>;
1075 using get_strides_t =
typename get_strides_type<C>::type;
1084 using storage_type = std::decay_t<ST>;
1085 using type = std::conditional_t<
1086 std::is_const<std::remove_reference_t<ST>>::value,
1087 typename storage_type::const_reference,
1088 typename storage_type::reference>;
1092 using inner_reference_t =
typename inner_reference<ST>::type;
1098 template <
class E,
typename =
void>
1101 static constexpr std::size_t value =
SIZE_MAX;
1107 static constexpr std::size_t value = E::rank;
1117 using type = std::integral_constant<bool, get_rank<std::decay_t<E>>::value !=
SIZE_MAX>;
1127 template <
class E,
size_t N>
1130 using type = std::integral_constant<bool, get_rank<std::decay_t<E>>::value == N>;
1133 template <
class E,
size_t N>
Fixed shape implementation for compile time defined arrays.
auto strides(const E &e, stride_type type=stride_type::normal) noexcept
Get strides of an object.
standard mathematical functions for xexpressions
bool operator==(const xaxis_iterator< CT > &lhs, const xaxis_iterator< CT > &rhs)
Checks equality of the iterators.
auto accumulate(F &&f, E &&e, EVS evaluation_strategy=EVS())
Accumulate and flatten array NOTE This function is not lazy!
bool operator!=(const xaxis_iterator< CT > &lhs, const xaxis_iterator< CT > &rhs)
Checks inequality of the iterators.
auto conditional_cast(U &&u)
Perform a type cast when a condition is true.