xtensor
 
Loading...
Searching...
No Matches
xutils.hpp
1/***************************************************************************
2 * Copyright (c) Johan Mabille, Sylvain Corlay and Wolf Vollprecht *
3 * Copyright (c) QuantStack *
4 * *
5 * Distributed under the terms of the BSD 3-Clause License. *
6 * *
7 * The full license is in the file LICENSE, distributed with this software. *
8 ****************************************************************************/
9
10#ifndef XTENSOR_UTILS_HPP
11#define XTENSOR_UTILS_HPP
12
13#include <algorithm>
14#include <array>
15#include <cmath>
16#include <complex>
17#include <cstddef>
18#include <initializer_list>
19#include <iostream>
20#include <memory>
21#include <tuple>
22#include <type_traits>
23#include <utility>
24#include <vector>
25
26#include <xtl/xfunctional.hpp>
27#include <xtl/xmeta_utils.hpp>
28#include <xtl/xsequence.hpp>
29#include <xtl/xtype_traits.hpp>
30
31#include "../core/xtensor_config.hpp"
32
33#if (defined(_MSC_VER) && _MSC_VER >= 1910)
34#define NOEXCEPT(T)
35#else
36#define NOEXCEPT(T) noexcept(T)
37#endif
38
39namespace xt
40{
41 /****************
42 * declarations *
43 ****************/
44
45 template <class T>
46 struct remove_class;
47
48 /*template <class F, class... T>
49 void for_each(F&& f, std::tuple<T...>& t) noexcept(implementation_dependent);*/
50
51 /*template <class F, class R, class... T>
52 R accumulate(F&& f, R init, const std::tuple<T...>& t) noexcept(implementation_dependent);*/
53
54 template <std::size_t I, class... Args>
55 constexpr decltype(auto) argument(Args&&... args) noexcept;
56
57 template <class R, class F, class... S>
58 R apply(std::size_t index, F&& func, const std::tuple<S...>& s) NOEXCEPT(noexcept(func(std::get<0>(s))));
59
60 template <class T, class S>
61 void nested_copy(T&& iter, const S& s);
62
63 template <class T, class S>
64 void nested_copy(T&& iter, std::initializer_list<S> s);
65
66 template <class C>
67 bool resize_container(C& c, typename C::size_type size);
68
69 template <class T, std::size_t N>
70 bool resize_container(std::array<T, N>& a, typename std::array<T, N>::size_type size);
71
72 template <std::size_t... I>
73 class fixed_shape;
74
75 template <std::size_t... I>
76 bool resize_container(fixed_shape<I...>& a, std::size_t size);
77
78 template <class X, class C>
80
81 template <class X, class C>
82 using rebind_container_t = typename rebind_container<X, C>::type;
83
84 std::size_t normalize_axis(std::size_t dim, std::ptrdiff_t axis);
85
86 // gcc 4.9 is affected by C++14 defect CGW 1558
87 // see http://open-std.org/JTC1/SC22/WG21/docs/cwg_defects.html#1558
88 template <class... T>
89 struct make_void
90 {
91 using type = void;
92 };
93
94 template <class... T>
95 using void_t = typename make_void<T...>::type;
96
97 // This is used for non existent types (e.g. storage for some expressions
98 // like generators)
100 {
101 };
102
103 template <class... T>
105 {
106 using type = invalid_type;
107 };
108
109 template <class T, class R>
110 using disable_integral_t = std::enable_if_t<!xtl::is_integral<T>::value, R>;
111
112 /********************************
113 * meta identity implementation *
114 ********************************/
115
116 template <class T>
118 {
119 using type = T;
120 };
121
122 /***************************************
123 * is_specialization_of implementation *
124 ***************************************/
125
126 template <template <class...> class TT, class T>
127 struct is_specialization_of : std::false_type
128 {
129 };
130
131 template <template <class...> class TT, class... Ts>
132 struct is_specialization_of<TT, TT<Ts...>> : std::true_type
133 {
134 };
135
136 /*******************************
137 * remove_class implementation *
138 *******************************/
139
140 template <class T>
142 {
143 };
144
145 template <class C, class R, class... Args>
146 struct remove_class<R (C::*)(Args...)>
147 {
148 typedef R type(Args...);
149 };
150
151 template <class C, class R, class... Args>
152 struct remove_class<R (C::*)(Args...) const>
153 {
154 typedef R type(Args...);
155 };
156
157 template <class T>
158 using remove_class_t = typename remove_class<T>::type;
159
160 /***************************
161 * for_each implementation *
162 ***************************/
163
164 namespace detail
165 {
166 template <class F, size_t... I, class... Ts>
167 void for_each(F&& f, std::tuple<Ts...>& t, std::index_sequence<I...>) noexcept(
168 (noexcept(f(std::get<I>(t))) && ...)
169 )
170 {
171 (f(std::get<I>(t)), ...);
172 }
173
174 template <class F, size_t... I, class... Ts>
175 void for_each(F&& f, const std::tuple<Ts...>& t, std::index_sequence<I...>) noexcept(
176 (noexcept(f(std::get<I>(t))) && ...)
177 )
178 {
179 (f(std::get<I>(t)), ...);
180 }
181 }
182
183 template <class F, class... Ts>
184 inline void for_each(F&& f, std::tuple<Ts...>& t) noexcept(
185 noexcept(detail::for_each(std::forward<F>(f), t, std::make_index_sequence<sizeof...(Ts)>{}))
186 )
187 {
188 detail::for_each(std::forward<F>(f), t, std::make_index_sequence<sizeof...(Ts)>{});
189 }
190
191 template <class F, class... Ts>
192 inline void for_each(F&& f, const std::tuple<Ts...>& t) noexcept(
193 noexcept(detail::for_each(std::forward<F>(f), t, std::make_index_sequence<sizeof...(Ts)>{}))
194 )
195 {
196 detail::for_each(std::forward<F>(f), t, std::make_index_sequence<sizeof...(Ts)>{});
197 }
198
199 /*****************************
200 * accumulate implementation *
201 *****************************/
202
204
205 namespace detail
206 {
207 template <class F, class R, class... T, size_t... I>
208 R accumulate_impl(F&& f, R init, const std::tuple<T...>& t, std::index_sequence<I...> /*I*/) noexcept(
209 (noexcept(f(init, std::get<I>(t))) && ...)
210 )
211 {
212 R res = init;
213 auto wrapper = [&](const auto& i, const auto& j)
214 {
215 res = f(i, j);
216 };
217 (wrapper(res, std::get<I>(t)), ...);
218 return res;
219 }
220 }
221
222 template <class F, class R, class... T>
223 inline R accumulate(F&& f, R init, const std::tuple<T...>& t) noexcept(
224 noexcept(detail::accumulate_impl(std::forward<F>(f), init, t, std::make_index_sequence<sizeof...(T)>{}))
225 )
226 {
227 return detail::accumulate_impl(std::forward<F>(f), init, t, std::make_index_sequence<sizeof...(T)>{});
228 }
229
231
232 /***************************
233 * argument implementation *
234 ***************************/
235
236 namespace detail
237 {
238 template <std::size_t I>
239 struct getter
240 {
241 template <class Arg, class... Args>
242 static constexpr decltype(auto) get(Arg&& /*arg*/, Args&&... args) noexcept
243 {
244 return getter<I - 1>::get(std::forward<Args>(args)...);
245 }
246 };
247
248 template <>
249 struct getter<0>
250 {
251 template <class Arg, class... Args>
252 static constexpr Arg&& get(Arg&& arg, Args&&... /*args*/) noexcept
253 {
254 return std::forward<Arg>(arg);
255 }
256 };
257 }
258
259 template <std::size_t I, class... Args>
260 constexpr decltype(auto) argument(Args&&... args) noexcept
261 {
262 static_assert(I < sizeof...(Args), "I should be lesser than sizeof...(Args)");
263 return detail::getter<I>::get(std::forward<Args>(args)...);
264 }
265
266 /************************
267 * apply implementation *
268 ************************/
269
270 template <class R, class F, class... S>
271 inline R apply(std::size_t index, F&& func, const std::tuple<S...>& s)
272 NOEXCEPT(noexcept(func(std::get<0>(s))))
273 {
274 XTENSOR_ASSERT(sizeof...(S) > index);
275 return std::apply(
276 [&](const S&... args) -> R
277 {
278 auto f_impl = [&](auto&& self, auto&& i, auto&& h, auto&&... t) -> R
279 {
280 if (i == index)
281 {
282 return static_cast<R>(func(h));
283 }
284 if constexpr (sizeof...(t) > 0)
285 {
286 return self(self, std::size_t{i + 1}, t...);
287 }
288 return R{};
289 };
290 return f_impl(f_impl, std::size_t{0}, args...);
291 },
292 s
293 );
294 }
295
296 /***************************
297 * nested_initializer_list *
298 ***************************/
299
300 template <class T, std::size_t I>
302 {
303 using type = std::initializer_list<typename nested_initializer_list<T, I - 1>::type>;
304 };
305
306 template <class T>
308 {
309 using type = T;
310 };
311
312 template <class T, std::size_t I>
313 using nested_initializer_list_t = typename nested_initializer_list<T, I>::type;
314
315 /******************************
316 * nested_copy implementation *
317 ******************************/
318
319 template <class T, class S>
320 inline void nested_copy(T&& iter, const S& s)
321 {
322 *iter++ = s;
323 }
324
325 template <class T, class S>
326 inline void nested_copy(T&& iter, std::initializer_list<S> s)
327 {
328 for (auto it = s.begin(); it != s.end(); ++it)
329 {
330 nested_copy(std::forward<T>(iter), *it);
331 }
332 }
333
334 /***********************************
335 * resize_container implementation *
336 ***********************************/
337 template <class C>
338 inline bool resize_container(C& c, typename C::size_type size)
339 {
340 c.resize(size);
341 return true;
342 }
343
344 template <class T, std::size_t N>
345 inline bool resize_container(std::array<T, N>& /*a*/, typename std::array<T, N>::size_type size)
346 {
347 return size == N;
348 }
349
350 template <std::size_t... I>
351 inline bool resize_container(xt::fixed_shape<I...>&, std::size_t size)
352 {
353 return sizeof...(I) == size;
354 }
355
356 /*********************************
357 * normalize_axis implementation *
358 *********************************/
359
360 // scalar normalize axis
361 inline std::size_t normalize_axis(std::size_t dim, std::ptrdiff_t axis)
362 {
363 return axis < 0 ? static_cast<std::size_t>(static_cast<std::ptrdiff_t>(dim) + axis)
364 : static_cast<std::size_t>(axis);
365 }
366
367 template <class E, class C>
368 inline std::enable_if_t<
369 !xtl::is_integral<std::decay_t<C>>::value && xtl::is_signed<typename std::decay_t<C>::value_type>::value,
370 rebind_container_t<std::size_t, std::decay_t<C>>>
371 normalize_axis(E& expr, C&& axes)
372 {
373 rebind_container_t<std::size_t, std::decay_t<C>> res;
374 resize_container(res, axes.size());
375
376 for (std::size_t i = 0; i < axes.size(); ++i)
377 {
378 res[i] = normalize_axis(expr.dimension(), axes[i]);
379 }
380
381 XTENSOR_ASSERT(std::all_of(
382 res.begin(),
383 res.end(),
384 [&expr](auto ax_el)
385 {
386 return ax_el < expr.dimension();
387 }
388 ));
389
390 return res;
391 }
392
393 template <class C, class E>
394 inline std::enable_if_t<
395 !xtl::is_integral<std::decay_t<C>>::value && std::is_unsigned<typename std::decay_t<C>::value_type>::value,
396 C&&>
397 normalize_axis(E& expr, C&& axes)
398 {
399 static_cast<void>(expr);
400 XTENSOR_ASSERT(std::all_of(
401 axes.begin(),
402 axes.end(),
403 [&expr](auto ax_el)
404 {
405 return ax_el < expr.dimension();
406 }
407 ));
408 return std::forward<C>(axes);
409 }
410
411 template <class R, class E, class C>
412 inline auto forward_normalize(E& expr, C&& axes)
413 -> std::enable_if_t<xtl::is_signed<std::decay_t<decltype(*std::begin(axes))>>::value, R>
414 {
415 R res;
416 xt::resize_container(res, std::size(axes));
417 auto dim = expr.dimension();
418 std::transform(
419 std::begin(axes),
420 std::end(axes),
421 std::begin(res),
422 [&dim](auto ax_el)
423 {
424 return normalize_axis(dim, ax_el);
425 }
426 );
427
428 XTENSOR_ASSERT(std::all_of(
429 res.begin(),
430 res.end(),
431 [&expr](auto ax_el)
432 {
433 return ax_el < expr.dimension();
434 }
435 ));
436
437 return res;
438 }
439
440 template <class R, class E, class C>
441 inline auto forward_normalize(E& expr, C&& axes) -> std::enable_if_t<
442 !xtl::is_signed<std::decay_t<decltype(*std::begin(axes))>>::value && !std::is_same<R, std::decay_t<C>>::value,
443 R>
444 {
445 static_cast<void>(expr);
446
447 R res;
448 xt::resize_container(res, std::size(axes));
449 std::copy(std::begin(axes), std::end(axes), std::begin(res));
450 XTENSOR_ASSERT(std::all_of(
451 res.begin(),
452 res.end(),
453 [&expr](auto ax_el)
454 {
455 return ax_el < expr.dimension();
456 }
457 ));
458 return res;
459 }
460
461 template <class R, class E, class C>
462 inline auto forward_normalize(E& expr, C&& axes) -> std::enable_if_t<
463 !xtl::is_signed<std::decay_t<decltype(*std::begin(axes))>>::value && std::is_same<R, std::decay_t<C>>::value,
464 R&&>
465 {
466 static_cast<void>(expr);
467 XTENSOR_ASSERT(std::all_of(
468 std::begin(axes),
469 std::end(axes),
470 [&expr](auto ax_el)
471 {
472 return ax_el < expr.dimension();
473 }
474 ));
475 return std::move(axes);
476 }
477
478 /******************
479 * get_value_type *
480 ******************/
481
482 template <class T, class = void_t<>>
484 {
485 using type = T;
486 };
487
488 template <class T>
489 struct get_value_type<T, void_t<typename T::value_type>>
490 {
491 using type = typename T::value_type;
492 };
493
494 template <class T>
495 using get_value_type_t = typename get_value_type<T>::type;
496
497 /**********************
498 * get implementation *
499 **********************/
500
501 // When subclassing from std::tuple not all compilers are able to correctly instantiate get
502 // See here: https://stackoverflow.com/a/37188019/2528668
503 template <std::size_t I, template <typename... Args> class T, typename... Args>
504 decltype(auto) get(T<Args...>&& v)
505 {
506 return std::get<I>(static_cast<std::tuple<Args...>&&>(v));
507 }
508
509 template <std::size_t I, template <typename... Args> class T, typename... Args>
510 decltype(auto) get(T<Args...>& v)
511 {
512 return std::get<I>(static_cast<std::tuple<Args...>&>(v));
513 }
514
515 template <std::size_t I, template <typename... Args> class T, typename... Args>
516 decltype(auto) get(const T<Args...>& v)
517 {
518 return std::get<I>(static_cast<const std::tuple<Args...>&>(v));
519 }
520
521 /**************************
522 * to_array implementation *
523 ***************************/
524
525 namespace detail
526 {
527 template <class T, std::size_t N, std::size_t... I>
528 constexpr std::array<std::remove_cv_t<T>, N> to_array_impl(T (&a)[N], std::index_sequence<I...>)
529 {
530 return {{a[I]...}};
531 }
532 }
533
534 template <class T, std::size_t N>
535 constexpr std::array<std::remove_cv_t<T>, N> to_array(T (&a)[N])
536 {
537 return detail::to_array_impl(a, std::make_index_sequence<N>{});
538 }
539
540 /***********************************
541 * has_storage_type implementation *
542 ***********************************/
543
544 template <class T, class = void>
545 struct has_storage_type : std::false_type
546 {
547 };
548
549 template <class T>
551
552 template <class T>
553 struct has_storage_type<T, void_t<typename xcontainer_inner_types<T>::storage_type>>
554 : std::negation<
555 std::is_same<typename std::remove_cv<typename xcontainer_inner_types<T>::storage_type>::type, invalid_type>>
556 {
557 };
558
559 /*************************************
560 * has_data_interface implementation *
561 *************************************/
562
563 template <class E, class = void>
564 struct has_data_interface : std::false_type
565 {
566 };
567
568 template <class E>
569 struct has_data_interface<E, void_t<decltype(std::declval<E>().data())>> : std::true_type
570 {
571 };
572
573 template <class E>
575
576 template <class E, class = void>
577 struct has_strides : std::false_type
578 {
579 };
580
581 template <class E>
582 struct has_strides<E, void_t<decltype(std::declval<E>().strides())>> : std::true_type
583 {
584 };
585
586 template <class E, class = void>
587 struct has_iterator_interface : std::false_type
588 {
589 };
590
591 template <class E>
592 struct has_iterator_interface<E, void_t<decltype(std::declval<E>().begin())>> : std::true_type
593 {
594 };
595
596 template <class E>
598
599 /******************************
600 * is_iterator implementation *
601 ******************************/
602
603 template <class E, class = void>
604 struct is_iterator : std::false_type
605 {
606 };
607
608 template <class E>
610 E,
611 void_t<
612 decltype(*std::declval<const E>(), std::declval<const E>() == std::declval<const E>(), std::declval<const E>() != std::declval<const E>(), ++(*std::declval<E*>()), (*std::declval<E*>())++, std::true_type())>>
613 : std::true_type
614 {
615 };
616
617 template <typename E>
619
620 /********************************************
621 * xtrivial_default_construct implemenation *
622 ********************************************/
623
624#if defined(_GLIBCXX_RELEASE) && _GLIBCXX_RELEASE >= 7
625// has_trivial_default_constructor has not been available since libstdc++-7.
626#define XTENSOR_GLIBCXX_USE_CXX11_ABI 1
627#else
628#if defined(_GLIBCXX_USE_CXX11_ABI)
629#if _GLIBCXX_USE_CXX11_ABI || (defined(_GLIBCXX_USE_DUAL_ABI) && !_GLIBCXX_USE_DUAL_ABI)
630#define XTENSOR_GLIBCXX_USE_CXX11_ABI 1
631#endif
632#endif
633#endif
634
635#if !defined(__GNUG__) || defined(_LIBCPP_VERSION) || defined(XTENSOR_GLIBCXX_USE_CXX11_ABI)
636
637 template <class T>
638 using xtrivially_default_constructible = std::is_trivially_default_constructible<T>;
639
640#else
641
642 template <class T>
643 using xtrivially_default_constructible = std::has_trivial_default_constructor<T>;
644
645#endif
646#undef XTENSOR_GLIBCXX_USE_CXX11_ABI
647
648 /*************************
649 * conditional type cast *
650 *************************/
651
652 template <bool condition, class T>
654
655 template <class T>
656 struct conditional_cast_functor<false, T> : public xtl::identity
657 {
658 };
659
660 template <class T>
662 {
663 template <class U>
664 inline auto operator()(U&& u) const
665 {
666 return static_cast<T>(std::forward<U>(u));
667 }
668 };
669
678 template <bool condition, class T, class U>
679 inline auto conditional_cast(U&& u)
680 {
681 return conditional_cast_functor<condition, T>()(std::forward<U>(u));
682 }
683
684 /**********************
685 * tracking allocator *
686 **********************/
687
688 namespace alloc_tracking
689 {
690 inline bool& enabled()
691 {
692 static bool enabled;
693 return enabled;
694 }
695
696 inline void enable()
697 {
698 enabled() = true;
699 }
700
701 inline void disable()
702 {
703 enabled() = false;
704 }
705
706 enum policy
707 {
708 print,
709 assert
710 };
711 }
712
713 template <class T, class A, alloc_tracking::policy P>
714 struct tracking_allocator : private A
715 {
716 using base_type = A;
717 using value_type = typename A::value_type;
718 using reference = value_type&;
719 using const_reference = const value_type&;
720 using pointer = typename std::allocator_traits<A>::pointer;
721 using const_pointer = typename std::allocator_traits<A>::const_pointer;
722 using size_type = typename std::allocator_traits<A>::size_type;
723 using difference_type = typename std::allocator_traits<A>::difference_type;
724
725 tracking_allocator() = default;
726
727 T* allocate(std::size_t n)
728 {
729 if (alloc_tracking::enabled())
730 {
731 if (P == alloc_tracking::print)
732 {
733 std::cout << "xtensor allocating: " << n << "" << std::endl;
734 }
735 else if (P == alloc_tracking::assert)
736 {
737 XTENSOR_THROW(
738 std::runtime_error,
739 "xtensor allocation of " + std::to_string(n) + " elements detected"
740 );
741 }
742 }
743 return base_type::allocate(n);
744 }
745
746 using base_type::deallocate;
747
748// Construct and destroy are removed in --std=c++-20
749#if ((defined(__cplusplus) && __cplusplus < 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG < 202002L))
750 using base_type::construct;
751 using base_type::destroy;
752#endif
753
754 template <class U>
755 struct rebind
756 {
757 using traits = std::allocator_traits<A>;
758 using other = tracking_allocator<U, typename traits::template rebind_alloc<U>, P>;
759 };
760 };
761
762 template <class T, class AT, alloc_tracking::policy PT, class U, class AU, alloc_tracking::policy PU>
763 inline bool operator==(const tracking_allocator<T, AT, PT>&, const tracking_allocator<U, AU, PU>&)
764 {
765 return std::is_same<AT, AU>::value;
766 }
767
768 template <class T, class AT, alloc_tracking::policy PT, class U, class AU, alloc_tracking::policy PU>
769 inline bool operator!=(const tracking_allocator<T, AT, PT>& a, const tracking_allocator<U, AU, PU>& b)
770 {
771 return !(a == b);
772 }
773
774 /*****************
775 * has_assign_to *
776 *****************/
777
778 template <class E1, class E2, class = void>
779 struct has_assign_to : std::false_type
780 {
781 };
782
783 template <class E1, class E2>
784 struct has_assign_to<E1, E2, void_t<decltype(std::declval<const E2&>().assign_to(std::declval<E1&>()))>>
785 : std::true_type
786 {
787 };
788
789 template <class E1, class E2>
790 constexpr bool has_assign_to_v = has_assign_to<E1, E2>::value;
791
792 /*************************************
793 * overlapping_memory_checker_traits *
794 *************************************/
795
796 template <class T, class Enable = void>
797 struct has_memory_address : std::false_type
798 {
799 };
800
801 template <class T>
802 struct has_memory_address<T, void_t<decltype(std::addressof(*std::declval<T>().begin()))>> : std::true_type
803 {
804 };
805
806 template <typename T>
808 template <typename T>
810
811 struct memory_range
812 {
813 // Checking pointer overlap is more correct in integer values,
814 // for more explanation check https://devblogs.microsoft.com/oldnewthing/20170927-00/?p=97095
815 const uintptr_t m_first = 0;
816 const uintptr_t m_last = 0;
817
818 explicit memory_range() = default;
819
820 template <class T>
821 explicit memory_range(T* first, T* last)
822 : m_first(reinterpret_cast<uintptr_t>(last < first ? last : first))
823 , m_last(reinterpret_cast<uintptr_t>(last < first ? first : last))
824 {
825 }
826
827 template <class T>
828 bool overlaps(T* first, T* last) const
829 {
830 if (first <= last)
831 {
832 return reinterpret_cast<uintptr_t>(first) <= m_last
833 && reinterpret_cast<uintptr_t>(last) >= m_first;
834 }
835 else
836 {
837 return reinterpret_cast<uintptr_t>(last) <= m_last
838 && reinterpret_cast<uintptr_t>(first) >= m_first;
839 }
840 }
841 };
842
843 template <class E, class Enable = void>
845 {
846 static bool check_overlap(const E&, const memory_range&)
847 {
848 return true;
849 }
850 };
851
852 template <class E>
853 struct overlapping_memory_checker_traits<E, std::enable_if_t<has_memory_address<E>::value>>
854 {
855 static bool check_overlap(const E& expr, const memory_range& dst_range)
856 {
857 if (expr.size() == 0)
858 {
859 return false;
860 }
861 else
862 {
863 return dst_range.overlaps(std::addressof(*expr.begin()), std::addressof(*expr.rbegin()));
864 }
865 }
866 };
867
868 struct overlapping_memory_checker_base
869 {
870 memory_range m_dst_range;
871
872 explicit overlapping_memory_checker_base() = default;
873
874 explicit overlapping_memory_checker_base(memory_range dst_memory_range)
875 : m_dst_range(std::move(dst_memory_range))
876 {
877 }
878
879 template <class E>
880 bool check_overlap(const E& expr) const
881 {
882 if (!m_dst_range.m_first || !m_dst_range.m_last)
883 {
884 return false;
885 }
886 else
887 {
888 return overlapping_memory_checker_traits<E>::check_overlap(expr, m_dst_range);
889 }
890 }
891 };
892
893 template <class Dst, class Enable = void>
894 struct overlapping_memory_checker : overlapping_memory_checker_base
895 {
896 explicit overlapping_memory_checker(const Dst&)
897 : overlapping_memory_checker_base()
898 {
899 }
900 };
901
902 template <class Dst>
903 struct overlapping_memory_checker<Dst, std::enable_if_t<has_memory_address<Dst>::value>>
904 : overlapping_memory_checker_base
905 {
906 explicit overlapping_memory_checker(const Dst& aDst)
907 : overlapping_memory_checker_base(
908 [&]()
909 {
910 if (aDst.size() == 0)
911 {
912 return memory_range();
913 }
914 else
915 {
916 return memory_range(std::addressof(*aDst.begin()), std::addressof(*aDst.rbegin()));
917 }
918 }()
919 )
920 {
921 }
922 };
923
924 template <class Dst>
925 auto make_overlapping_memory_checker(const Dst& a_dst)
926 {
928 }
929
930 /********************
931 * rebind_container *
932 ********************/
933
934 template <class X, template <class, class> class C, class T, class A>
935 struct rebind_container<X, C<T, A>>
936 {
937 using traits = std::allocator_traits<A>;
938 using allocator = typename traits::template rebind_alloc<X>;
939 using type = C<X, allocator>;
940 };
941
942// Workaround for rebind_container problems when C++17 feature is enabled
943#ifdef __cpp_template_template_args
944 template <class X, class T, std::size_t N>
945 struct rebind_container<X, std::array<T, N>>
946 {
947 using type = std::array<X, N>;
948 };
949#else
950 template <class X, template <class, std::size_t> class C, class T, std::size_t N>
951 struct rebind_container<X, C<T, N>>
952 {
953 using type = C<X, N>;
954 };
955#endif
956
957 /********************
958 * get_strides_type *
959 ********************/
960
961 template <class S>
963 {
964 using type = typename rebind_container<std::ptrdiff_t, S>::type;
965 };
966
967 template <std::size_t... I>
969 {
970 // TODO we could compute the strides statically here.
971 // But we'll need full constexpr support to have a
972 // homogenous ``compute_strides`` method
973 using type = std::array<std::ptrdiff_t, sizeof...(I)>;
974 };
975
976 template <class CP, class O, class A>
977 class xbuffer_adaptor;
978
979 template <class CP, class O, class A>
981 {
982 // In bindings this mapping is called by reshape_view with an inner shape of type
983 // xbuffer_adaptor.
984 // Since we cannot create a buffer adaptor holding data, we map it to an std::vector.
985 using type = std::vector<
986 typename xbuffer_adaptor<CP, O, A>::value_type,
987 typename xbuffer_adaptor<CP, O, A>::allocator_type>;
988 };
989
990
991 template <class C>
992 using get_strides_t = typename get_strides_type<C>::type;
993
994 /*******************
995 * inner_reference *
996 *******************/
997
998 template <class ST>
1000 {
1001 using storage_type = std::decay_t<ST>;
1002 using type = std::conditional_t<
1003 std::is_const<std::remove_reference_t<ST>>::value,
1004 typename storage_type::const_reference,
1005 typename storage_type::reference>;
1006 };
1007
1008 template <class ST>
1009 using inner_reference_t = typename inner_reference<ST>::type;
1010
1011 /************
1012 * get_rank *
1013 ************/
1014
1015 template <class E, typename = void>
1017 {
1018 static constexpr std::size_t value = SIZE_MAX;
1019 };
1020
1021 template <class E>
1022 struct get_rank<E, decltype((void) E::rank, void())>
1023 {
1024 static constexpr std::size_t value = E::rank;
1025 };
1026
1027 /******************
1028 * has_fixed_rank *
1029 ******************/
1030
1031 template <class E>
1033 {
1034 using type = std::integral_constant<bool, get_rank<std::decay_t<E>>::value != SIZE_MAX>;
1035 };
1036
1037 template <class E>
1038 using has_fixed_rank_t = typename has_fixed_rank<std::decay_t<E>>::type;
1039
1040 /************
1041 * has_rank *
1042 ************/
1043
1044 template <class E, size_t N>
1046 {
1047 using type = std::integral_constant<bool, get_rank<std::decay_t<E>>::value == N>;
1048 };
1049
1050 template <class E, size_t N>
1051 using has_rank_t = typename has_rank<std::decay_t<E>, N>::type;
1052
1053}
1054
1055#endif
Fixed shape implementation for compile time defined arrays.
auto arg(E &&e) noexcept
Calculates the phase angle (in radians) elementwise for the complex numbers in e.
Definition xcomplex.hpp:221
auto strides(const E &e, stride_type type=stride_type::normal) noexcept
Get strides of an object.
Definition xstrides.hpp:248
standard mathematical functions for xexpressions
auto accumulate(F &&f, E &&e, EVS evaluation_strategy=EVS())
Accumulate and flatten array NOTE This function is not lazy!
auto conditional_cast(U &&u)
Perform a type cast when a condition is true.
Definition xutils.hpp:679