10#ifndef XTENSOR_UTILS_HPP
11#define XTENSOR_UTILS_HPP
18#include <initializer_list>
26#include <xtl/xfunctional.hpp>
27#include <xtl/xmeta_utils.hpp>
28#include <xtl/xsequence.hpp>
29#include <xtl/xtype_traits.hpp>
31#include "../core/xtensor_config.hpp"
33#if (defined(_MSC_VER) && _MSC_VER >= 1910)
36#define NOEXCEPT(T) noexcept(T)
54 template <std::size_t I,
class... Args>
55 constexpr decltype(
auto) argument(Args&&... args)
noexcept;
57 template <
class R,
class F,
class... S>
58 R apply(std::size_t index, F&& func,
const std::tuple<S...>& s) NOEXCEPT(
noexcept(func(std::get<0>(s))));
60 template <
class T,
class S>
61 void nested_copy(T&& iter,
const S& s);
63 template <
class T,
class S>
64 void nested_copy(T&& iter, std::initializer_list<S> s);
67 bool resize_container(C& c,
typename C::size_type size);
69 template <
class T, std::
size_t N>
70 bool resize_container(std::array<T, N>& a,
typename std::array<T, N>::size_type size);
72 template <std::size_t... I>
75 template <std::size_t... I>
78 template <
class X,
class C>
81 template <
class X,
class C>
84 std::size_t normalize_axis(std::size_t dim, std::ptrdiff_t axis);
95 using void_t =
typename make_void<T...>::type;
103 template <
class... T>
109 template <
class T,
class R>
110 using disable_integral_t = std::enable_if_t<!xtl::is_integral<T>::value, R>;
126 template <
template <
class...>
class TT,
class T>
131 template <
template <
class...>
class TT,
class... Ts>
145 template <
class C,
class R,
class... Args>
148 typedef R type(Args...);
151 template <
class C,
class R,
class... Args>
154 typedef R type(Args...);
166 template <
class F,
size_t... I,
class... Ts>
167 void for_each(F&& f, std::tuple<Ts...>& t, std::index_sequence<I...>)
noexcept(
168 (
noexcept(f(std::get<I>(t))) && ...)
171 (f(std::get<I>(t)), ...);
174 template <
class F,
size_t... I,
class... Ts>
175 void for_each(F&& f,
const std::tuple<Ts...>& t, std::index_sequence<I...>)
noexcept(
176 (
noexcept(f(std::get<I>(t))) && ...)
179 (f(std::get<I>(t)), ...);
183 template <
class F,
class... Ts>
184 inline void for_each(F&& f, std::tuple<Ts...>& t)
noexcept(
185 noexcept(detail::for_each(std::forward<F>(f), t, std::make_index_sequence<
sizeof...(Ts)>{}))
188 detail::for_each(std::forward<F>(f), t, std::make_index_sequence<
sizeof...(Ts)>{});
191 template <
class F,
class... Ts>
192 inline void for_each(F&& f,
const std::tuple<Ts...>& t)
noexcept(
193 noexcept(detail::for_each(std::forward<F>(f), t, std::make_index_sequence<
sizeof...(Ts)>{}))
196 detail::for_each(std::forward<F>(f), t, std::make_index_sequence<
sizeof...(Ts)>{});
207 template <
class F,
class R,
class... T,
size_t... I>
208 R accumulate_impl(F&& f, R init,
const std::tuple<T...>& t, std::index_sequence<I...> )
noexcept(
209 (
noexcept(f(init, std::get<I>(t))) && ...)
213 auto wrapper = [&](
const auto& i,
const auto& j)
217 (wrapper(res, std::get<I>(t)), ...);
222 template <
class F,
class R,
class... T>
223 inline R
accumulate(F&& f, R init,
const std::tuple<T...>& t)
noexcept(
224 noexcept(detail::accumulate_impl(std::forward<F>(f), init, t, std::make_index_sequence<
sizeof...(T)>{}))
227 return detail::accumulate_impl(std::forward<F>(f), init, t, std::make_index_sequence<
sizeof...(T)>{});
238 template <std::
size_t I>
241 template <
class Arg,
class... Args>
242 static constexpr decltype(
auto) get(Arg&& , Args&&... args)
noexcept
244 return getter<I - 1>::get(std::forward<Args>(args)...);
251 template <
class Arg,
class... Args>
252 static constexpr Arg&& get(Arg&&
arg, Args&&... ) noexcept
254 return std::forward<Arg>(
arg);
259 template <std::size_t I,
class... Args>
260 constexpr decltype(
auto) argument(Args&&... args)
noexcept
262 static_assert(I <
sizeof...(Args),
"I should be lesser than sizeof...(Args)");
263 return detail::getter<I>::get(std::forward<Args>(args)...);
270 template <
class R,
class F,
class... S>
271 inline R apply(std::size_t index, F&& func,
const std::tuple<S...>& s)
272 NOEXCEPT(
noexcept(func(std::get<0>(s))))
274 XTENSOR_ASSERT(
sizeof...(S) > index);
276 [&](
const S&... args) -> R
278 auto f_impl = [&](
auto&& self,
auto&& i,
auto&& h,
auto&&... t) -> R
282 return static_cast<R
>(func(h));
284 if constexpr (
sizeof...(t) > 0)
286 return self(self, std::size_t{i + 1}, t...);
290 return f_impl(f_impl, std::size_t{0}, args...);
300 template <
class T, std::
size_t I>
312 template <
class T, std::
size_t I>
313 using nested_initializer_list_t =
typename nested_initializer_list<T, I>::type;
319 template <
class T,
class S>
320 inline void nested_copy(T&& iter,
const S& s)
325 template <
class T,
class S>
326 inline void nested_copy(T&& iter, std::initializer_list<S> s)
328 for (
auto it = s.begin(); it != s.end(); ++it)
330 nested_copy(std::forward<T>(iter), *it);
338 inline bool resize_container(C& c,
typename C::size_type size)
344 template <
class T, std::
size_t N>
345 inline bool resize_container(std::array<T, N>& ,
typename std::array<T, N>::size_type size)
350 template <std::size_t... I>
351 inline bool resize_container(xt::fixed_shape<I...>&, std::size_t size)
353 return sizeof...(I) == size;
361 inline std::size_t normalize_axis(std::size_t dim, std::ptrdiff_t axis)
363 return axis < 0 ? static_cast<std::size_t>(
static_cast<std::ptrdiff_t
>(dim) + axis)
364 : static_cast<std::size_t>(axis);
367 template <
class E,
class C>
368 inline std::enable_if_t<
369 !xtl::is_integral<std::decay_t<C>>::value && xtl::is_signed<typename std::decay_t<C>::value_type>::value,
370 rebind_container_t<std::size_t, std::decay_t<C>>>
371 normalize_axis(E& expr, C&& axes)
373 rebind_container_t<std::size_t, std::decay_t<C>> res;
374 resize_container(res, axes.size());
376 for (std::size_t i = 0; i < axes.size(); ++i)
378 res[i] = normalize_axis(expr.dimension(), axes[i]);
381 XTENSOR_ASSERT(std::all_of(
386 return ax_el < expr.dimension();
393 template <
class C,
class E>
394 inline std::enable_if_t<
395 !xtl::is_integral<std::decay_t<C>>::value && std::is_unsigned<typename std::decay_t<C>::value_type>::value,
397 normalize_axis(E& expr, C&& axes)
399 static_cast<void>(expr);
400 XTENSOR_ASSERT(std::all_of(
405 return ax_el < expr.dimension();
408 return std::forward<C>(axes);
411 template <
class R,
class E,
class C>
412 inline auto forward_normalize(E& expr, C&& axes)
413 -> std::enable_if_t<xtl::is_signed<std::decay_t<
decltype(*std::begin(axes))>>::value, R>
416 xt::resize_container(res, std::size(axes));
417 auto dim = expr.dimension();
424 return normalize_axis(dim, ax_el);
428 XTENSOR_ASSERT(std::all_of(
433 return ax_el < expr.dimension();
440 template <
class R,
class E,
class C>
441 inline auto forward_normalize(E& expr, C&& axes) -> std::enable_if_t<
442 !xtl::is_signed<std::decay_t<
decltype(*std::begin(axes))>>::value && !std::is_same<R, std::decay_t<C>>::value,
445 static_cast<void>(expr);
448 xt::resize_container(res, std::size(axes));
449 std::copy(std::begin(axes), std::end(axes), std::begin(res));
450 XTENSOR_ASSERT(std::all_of(
455 return ax_el < expr.dimension();
461 template <
class R,
class E,
class C>
462 inline auto forward_normalize(E& expr, C&& axes) -> std::enable_if_t<
463 !xtl::is_signed<std::decay_t<
decltype(*std::begin(axes))>>::value && std::is_same<R, std::decay_t<C>>::value,
466 static_cast<void>(expr);
467 XTENSOR_ASSERT(std::all_of(
472 return ax_el < expr.dimension();
475 return std::move(axes);
482 template <
class T,
class =
void_t<>>
491 using type =
typename T::value_type;
495 using get_value_type_t =
typename get_value_type<T>::type;
503 template <std::size_t I,
template <
typename... Args>
class T, typename... Args>
504 decltype(auto) get(T<Args...>&& v)
506 return std::get<I>(
static_cast<std::tuple<Args...
>&&>(v));
509 template <std::size_t I,
template <
typename... Args>
class T, typename... Args>
510 decltype(auto) get(T<Args...>& v)
512 return std::get<I>(
static_cast<std::tuple<Args...
>&>(v));
515 template <std::size_t I,
template <
typename... Args>
class T, typename... Args>
516 decltype(auto) get(const T<Args...>& v)
518 return std::get<I>(
static_cast<const std::tuple<Args...
>&>(v));
527 template <
class T, std::size_t N, std::size_t... I>
528 constexpr std::array<std::remove_cv_t<T>, N> to_array_impl(T (&a)[N], std::index_sequence<I...>)
534 template <
class T, std::
size_t N>
535 constexpr std::array<std::remove_cv_t<T>, N> to_array(T (&a)[N])
537 return detail::to_array_impl(a, std::make_index_sequence<N>{});
544 template <
class T,
class =
void>
555 std::is_same<typename std::remove_cv<typename xcontainer_inner_types<T>::storage_type>::type, invalid_type>>
563 template <
class E,
class =
void>
576 template <
class E,
class =
void>
586 template <
class E,
class =
void>
603 template <
class E,
class =
void>
612 decltype(*std::declval<const E>(), std::declval<const E>() == std::declval<const E>(), std::declval<const E>() != std::declval<const E>(), ++(*std::declval<E*>()), (*std::declval<E*>())++, std::true_type())>>
617 template <
typename E>
624#if defined(_GLIBCXX_RELEASE) && _GLIBCXX_RELEASE >= 7
626#define XTENSOR_GLIBCXX_USE_CXX11_ABI 1
628#if defined(_GLIBCXX_USE_CXX11_ABI)
629#if _GLIBCXX_USE_CXX11_ABI || (defined(_GLIBCXX_USE_DUAL_ABI) && !_GLIBCXX_USE_DUAL_ABI)
630#define XTENSOR_GLIBCXX_USE_CXX11_ABI 1
635#if !defined(__GNUG__) || defined(_LIBCPP_VERSION) || defined(XTENSOR_GLIBCXX_USE_CXX11_ABI)
638 using xtrivially_default_constructible = std::is_trivially_default_constructible<T>;
643 using xtrivially_default_constructible = std::has_trivial_default_constructor<T>;
646#undef XTENSOR_GLIBCXX_USE_CXX11_ABI
652 template <
bool condition,
class T>
664 inline auto operator()(U&& u)
const
666 return static_cast<T
>(std::forward<U>(u));
678 template <
bool condition,
class T,
class U>
688 namespace alloc_tracking
690 inline bool& enabled()
701 inline void disable()
713 template <
class T,
class A, alloc_tracking::policy P>
714 struct tracking_allocator :
private A
717 using value_type =
typename A::value_type;
718 using reference = value_type&;
719 using const_reference =
const value_type&;
720 using pointer =
typename std::allocator_traits<A>::pointer;
721 using const_pointer =
typename std::allocator_traits<A>::const_pointer;
722 using size_type =
typename std::allocator_traits<A>::size_type;
723 using difference_type =
typename std::allocator_traits<A>::difference_type;
725 tracking_allocator() =
default;
727 T* allocate(std::size_t n)
729 if (alloc_tracking::enabled())
731 if (P == alloc_tracking::print)
733 std::cout <<
"xtensor allocating: " << n <<
"" << std::endl;
735 else if (P == alloc_tracking::assert)
739 "xtensor allocation of " + std::to_string(n) +
" elements detected"
743 return base_type::allocate(n);
746 using base_type::deallocate;
749#if ((defined(__cplusplus) && __cplusplus < 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG < 202002L))
750 using base_type::construct;
751 using base_type::destroy;
757 using traits = std::allocator_traits<A>;
758 using other = tracking_allocator<U, typename traits::template rebind_alloc<U>, P>;
762 template <
class T,
class AT, alloc_tracking::policy PT,
class U,
class AU, alloc_tracking::policy PU>
765 return std::is_same<AT, AU>::value;
768 template <
class T,
class AT, alloc_tracking::policy PT,
class U,
class AU, alloc_tracking::policy PU>
769 inline bool operator!=(
const tracking_allocator<T, AT, PT>& a,
const tracking_allocator<U, AU, PU>& b)
778 template <
class E1,
class E2,
class =
void>
783 template <
class E1,
class E2>
784 struct has_assign_to<E1, E2, void_t<decltype(std::declval<const E2&>().assign_to(std::declval<E1&>()))>>
789 template <
class E1,
class E2>
796 template <
class T,
class Enable =
void>
802 struct has_memory_address<T, void_t<decltype(std::addressof(*std::declval<T>().begin()))>> : std::true_type
806 template <
typename T>
808 template <
typename T>
815 const uintptr_t m_first = 0;
816 const uintptr_t m_last = 0;
818 explicit memory_range() =
default;
821 explicit memory_range(T* first, T* last)
822 : m_first(
reinterpret_cast<uintptr_t
>(last < first ? last : first))
823 , m_last(
reinterpret_cast<uintptr_t
>(last < first ? first : last))
828 bool overlaps(T* first, T* last)
const
832 return reinterpret_cast<uintptr_t
>(first) <= m_last
833 &&
reinterpret_cast<uintptr_t
>(last) >= m_first;
837 return reinterpret_cast<uintptr_t
>(last) <= m_last
838 &&
reinterpret_cast<uintptr_t
>(first) >= m_first;
843 template <
class E,
class Enable =
void>
846 static bool check_overlap(
const E&,
const memory_range&)
855 static bool check_overlap(
const E& expr,
const memory_range& dst_range)
857 if (expr.size() == 0)
863 return dst_range.overlaps(std::addressof(*expr.begin()), std::addressof(*expr.rbegin()));
868 struct overlapping_memory_checker_base
872 explicit overlapping_memory_checker_base() =
default;
874 explicit overlapping_memory_checker_base(
memory_range dst_memory_range)
875 : m_dst_range(std::move(dst_memory_range))
880 bool check_overlap(
const E& expr)
const
882 if (!m_dst_range.m_first || !m_dst_range.m_last)
888 return overlapping_memory_checker_traits<E>::check_overlap(expr, m_dst_range);
893 template <
class Dst,
class Enable =
void>
894 struct overlapping_memory_checker : overlapping_memory_checker_base
896 explicit overlapping_memory_checker(
const Dst&)
897 : overlapping_memory_checker_base()
903 struct overlapping_memory_checker<Dst, std::enable_if_t<has_memory_address<Dst>::value>>
904 : overlapping_memory_checker_base
906 explicit overlapping_memory_checker(
const Dst& aDst)
907 : overlapping_memory_checker_base(
910 if (aDst.size() == 0)
916 return memory_range(std::addressof(*aDst.begin()), std::addressof(*aDst.rbegin()));
925 auto make_overlapping_memory_checker(
const Dst& a_dst)
934 template <
class X,
template <
class,
class>
class C,
class T,
class A>
937 using traits = std::allocator_traits<A>;
938 using allocator =
typename traits::template rebind_alloc<X>;
939 using type = C<X, allocator>;
943#ifdef __cpp_template_template_args
944 template <
class X,
class T, std::
size_t N>
947 using type = std::array<X, N>;
950 template <
class X,
template <
class, std::
size_t>
class C,
class T, std::size_t N>
953 using type = C<X, N>;
967 template <std::size_t... I>
973 using type = std::array<std::ptrdiff_t,
sizeof...(I)>;
976 template <
class CP,
class O,
class A>
979 template <
class CP,
class O,
class A>
985 using type = std::vector<
986 typename xbuffer_adaptor<CP, O, A>::value_type,
987 typename xbuffer_adaptor<CP, O, A>::allocator_type>;
992 using get_strides_t =
typename get_strides_type<C>::type;
1001 using storage_type = std::decay_t<ST>;
1002 using type = std::conditional_t<
1003 std::is_const<std::remove_reference_t<ST>>::value,
1004 typename storage_type::const_reference,
1005 typename storage_type::reference>;
1009 using inner_reference_t =
typename inner_reference<ST>::type;
1015 template <
class E,
typename =
void>
1018 static constexpr std::size_t value = SIZE_MAX;
1024 static constexpr std::size_t value = E::rank;
1034 using type = std::integral_constant<bool, get_rank<std::decay_t<E>>::value != SIZE_MAX>;
1044 template <
class E,
size_t N>
1047 using type = std::integral_constant<bool, get_rank<std::decay_t<E>>::value == N>;
1050 template <
class E,
size_t N>
Fixed shape implementation for compile time defined arrays.
auto arg(E &&e) noexcept
Calculates the phase angle (in radians) elementwise for the complex numbers in e.
auto strides(const E &e, stride_type type=stride_type::normal) noexcept
Get strides of an object.
standard mathematical functions for xexpressions
auto accumulate(F &&f, E &&e, EVS evaluation_strategy=EVS())
Accumulate and flatten array NOTE This function is not lazy!
auto conditional_cast(U &&u)
Perform a type cast when a condition is true.