xtensor
 
Loading...
Searching...
No Matches
xutils.hpp
1/***************************************************************************
2 * Copyright (c) Johan Mabille, Sylvain Corlay and Wolf Vollprecht *
3 * Copyright (c) QuantStack *
4 * *
5 * Distributed under the terms of the BSD 3-Clause License. *
6 * *
7 * The full license is in the file LICENSE, distributed with this software. *
8 ****************************************************************************/
9
10#ifndef XTENSOR_UTILS_HPP
11#define XTENSOR_UTILS_HPP
12
13#include <algorithm>
14#include <array>
15#include <cmath>
16#include <complex>
17#include <cstddef>
18#include <initializer_list>
19#include <iostream>
20#include <memory>
21#include <tuple>
22#include <type_traits>
23#include <utility>
24#include <vector>
25
26#include <xtl/xfunctional.hpp>
27#include <xtl/xmeta_utils.hpp>
28#include <xtl/xsequence.hpp>
29#include <xtl/xtype_traits.hpp>
30
31#include "../core/xtensor_config.hpp"
32
33#if (defined(_MSC_VER) && _MSC_VER >= 1910)
34#define NOEXCEPT(T)
35#else
36#define NOEXCEPT(T) noexcept(T)
37#endif
38
39namespace xt
40{
41 /****************
42 * declarations *
43 ****************/
44
45 template <class T>
46 struct remove_class;
47
48 /*template <class F, class... T>
49 void for_each(F&& f, std::tuple<T...>& t) noexcept(implementation_dependent);*/
50
51 /*template <class F, class R, class... T>
52 R accumulate(F&& f, R init, const std::tuple<T...>& t) noexcept(implementation_dependent);*/
53
54 template <std::size_t I, class... Args>
55 constexpr decltype(auto) argument(Args&&... args) noexcept;
56
57 template <class R, class F, class... S>
58 R apply(std::size_t index, F&& func, const std::tuple<S...>& s) NOEXCEPT(noexcept(func(std::get<0>(s))));
59
60 template <class T, class S>
61 void nested_copy(T&& iter, const S& s);
62
63 template <class T, class S>
64 void nested_copy(T&& iter, std::initializer_list<S> s);
65
66 template <class C>
67 bool resize_container(C& c, typename C::size_type size);
68
69 template <class T, std::size_t N>
70 bool resize_container(std::array<T, N>& a, typename std::array<T, N>::size_type size);
71
72 template <std::size_t... I>
73 class fixed_shape;
74
75 template <std::size_t... I>
76 bool resize_container(fixed_shape<I...>& a, std::size_t size);
77
78 template <class X, class C>
80
81 template <class X, class C>
82 using rebind_container_t = typename rebind_container<X, C>::type;
83
84 std::size_t normalize_axis(std::size_t dim, std::ptrdiff_t axis);
85
86 // gcc 4.9 is affected by C++14 defect CGW 1558
87 // see http://open-std.org/JTC1/SC22/WG21/docs/cwg_defects.html#1558
88 template <class... T>
89 struct make_void
90 {
91 using type = void;
92 };
93
94 template <class... T>
95 using void_t = typename make_void<T...>::type;
96
97 // This is used for non existent types (e.g. storage for some expressions
98 // like generators)
100 {
101 };
102
103 template <class... T>
105 {
106 using type = invalid_type;
107 };
108
109 template <class T, class R>
110 using disable_integral_t = std::enable_if_t<!xtl::is_integral<T>::value, R>;
111
112 /********************************
113 * meta identity implementation *
114 ********************************/
115
116 template <class T>
118 {
119 using type = T;
120 };
121
122 /***************************************
123 * is_specialization_of implementation *
124 ***************************************/
125
126 template <template <class...> class TT, class T>
127 struct is_specialization_of : std::false_type
128 {
129 };
130
131 template <template <class...> class TT, class... Ts>
132 struct is_specialization_of<TT, TT<Ts...>> : std::true_type
133 {
134 };
135
136 /*******************************
137 * remove_class implementation *
138 *******************************/
139
140 template <class T>
142 {
143 };
144
145 template <class C, class R, class... Args>
146 struct remove_class<R (C::*)(Args...)>
147 {
148 typedef R type(Args...);
149 };
150
151 template <class C, class R, class... Args>
152 struct remove_class<R (C::*)(Args...) const>
153 {
154 typedef R type(Args...);
155 };
156
157 template <class T>
158 using remove_class_t = typename remove_class<T>::type;
159
160 /***************************
161 * for_each implementation *
162 ***************************/
163
164 namespace detail
165 {
166 template <std::size_t I, class F, class... T>
167 inline typename std::enable_if<I == sizeof...(T), void>::type
168 for_each_impl(F&& /*f*/, std::tuple<T...>& /*t*/) noexcept
169 {
170 }
171
172 template <std::size_t I, class F, class... T>
173 inline typename std::enable_if < I<sizeof...(T), void>::type
174 for_each_impl(F&& f, std::tuple<T...>& t) noexcept(noexcept(f(std::get<I>(t))))
175 {
176 f(std::get<I>(t));
177 for_each_impl<I + 1, F, T...>(std::forward<F>(f), t);
178 }
179 }
180
181 template <class F, class... T>
182 inline void for_each(F&& f, std::tuple<T...>& t) noexcept(
183 noexcept(detail::for_each_impl<0, F, T...>(std::forward<F>(f), t))
184 )
185 {
186 detail::for_each_impl<0, F, T...>(std::forward<F>(f), t);
187 }
188
189 namespace detail
190 {
191 template <std::size_t I, class F, class... T>
192 inline typename std::enable_if<I == sizeof...(T), void>::type
193 for_each_impl(F&& /*f*/, const std::tuple<T...>& /*t*/) noexcept
194 {
195 }
196
197 template <std::size_t I, class F, class... T>
198 inline typename std::enable_if < I<sizeof...(T), void>::type
199 for_each_impl(F&& f, const std::tuple<T...>& t) noexcept(noexcept(f(std::get<I>(t))))
200 {
201 f(std::get<I>(t));
202 for_each_impl<I + 1, F, T...>(std::forward<F>(f), t);
203 }
204 }
205
206 template <class F, class... T>
207 inline void for_each(F&& f, const std::tuple<T...>& t) noexcept(
208 noexcept(detail::for_each_impl<0, F, T...>(std::forward<F>(f), t))
209 )
210 {
211 detail::for_each_impl<0, F, T...>(std::forward<F>(f), t);
212 }
213
214 /*****************************
215 * accumulate implementation *
216 *****************************/
217
219
220 namespace detail
221 {
222 template <std::size_t I, class F, class R, class... T>
223 inline std::enable_if_t<I == sizeof...(T), R>
224 accumulate_impl(F&& /*f*/, R init, const std::tuple<T...>& /*t*/) noexcept
225 {
226 return init;
227 }
228
229 template <std::size_t I, class F, class R, class... T>
230 inline std::enable_if_t < I<sizeof...(T), R>
231 accumulate_impl(F&& f, R init, const std::tuple<T...>& t) noexcept(noexcept(f(init, std::get<I>(t))))
232 {
233 R res = f(init, std::get<I>(t));
234 return accumulate_impl<I + 1, F, R, T...>(std::forward<F>(f), res, t);
235 }
236 }
237
238 template <class F, class R, class... T>
239 inline R accumulate(F&& f, R init, const std::tuple<T...>& t) noexcept(
240 noexcept(detail::accumulate_impl<0, F, R, T...>(std::forward<F>(f), init, t))
241 )
242 {
243 return detail::accumulate_impl<0, F, R, T...>(std::forward<F>(f), init, t);
244 }
245
247
248 /***************************
249 * argument implementation *
250 ***************************/
251
252 namespace detail
253 {
254 template <std::size_t I>
255 struct getter
256 {
257 template <class Arg, class... Args>
258 static constexpr decltype(auto) get(Arg&& /*arg*/, Args&&... args) noexcept
259 {
260 return getter<I - 1>::get(std::forward<Args>(args)...);
261 }
262 };
263
264 template <>
265 struct getter<0>
266 {
267 template <class Arg, class... Args>
268 static constexpr Arg&& get(Arg&& arg, Args&&... /*args*/) noexcept
269 {
270 return std::forward<Arg>(arg);
271 }
272 };
273 }
274
275 template <std::size_t I, class... Args>
276 constexpr decltype(auto) argument(Args&&... args) noexcept
277 {
278 static_assert(I < sizeof...(Args), "I should be lesser than sizeof...(Args)");
279 return detail::getter<I>::get(std::forward<Args>(args)...);
280 }
281
282 /************************
283 * apply implementation *
284 ************************/
285
286 template <class R, class F, class... S>
287 inline R apply(std::size_t index, F&& func, const std::tuple<S...>& s)
288 NOEXCEPT(noexcept(func(std::get<0>(s))))
289 {
290 XTENSOR_ASSERT(sizeof...(S) > index);
291 return std::apply(
292 [&](const S&... args) -> R
293 {
294 auto f_impl = [&](auto&& self, auto&& i, auto&& h, auto&&... t) -> R
295 {
296 if (i == index)
297 {
298 return static_cast<R>(func(h));
299 }
300 if constexpr (sizeof...(t) > 0)
301 {
302 return self(self, std::size_t{i + 1}, t...);
303 }
304 return R{};
305 };
306 return f_impl(f_impl, std::size_t{0}, args...);
307 },
308 s
309 );
310 }
311
312 /***************************
313 * nested_initializer_list *
314 ***************************/
315
316 template <class T, std::size_t I>
318 {
319 using type = std::initializer_list<typename nested_initializer_list<T, I - 1>::type>;
320 };
321
322 template <class T>
324 {
325 using type = T;
326 };
327
328 template <class T, std::size_t I>
329 using nested_initializer_list_t = typename nested_initializer_list<T, I>::type;
330
331 /******************************
332 * nested_copy implementation *
333 ******************************/
334
335 template <class T, class S>
336 inline void nested_copy(T&& iter, const S& s)
337 {
338 *iter++ = s;
339 }
340
341 template <class T, class S>
342 inline void nested_copy(T&& iter, std::initializer_list<S> s)
343 {
344 for (auto it = s.begin(); it != s.end(); ++it)
345 {
346 nested_copy(std::forward<T>(iter), *it);
347 }
348 }
349
350 /***********************************
351 * resize_container implementation *
352 ***********************************/
353 template <class C>
354 inline bool resize_container(C& c, typename C::size_type size)
355 {
356 c.resize(size);
357 return true;
358 }
359
360 template <class T, std::size_t N>
361 inline bool resize_container(std::array<T, N>& /*a*/, typename std::array<T, N>::size_type size)
362 {
363 return size == N;
364 }
365
366 template <std::size_t... I>
367 inline bool resize_container(xt::fixed_shape<I...>&, std::size_t size)
368 {
369 return sizeof...(I) == size;
370 }
371
372 /*********************************
373 * normalize_axis implementation *
374 *********************************/
375
376 // scalar normalize axis
377 inline std::size_t normalize_axis(std::size_t dim, std::ptrdiff_t axis)
378 {
379 return axis < 0 ? static_cast<std::size_t>(static_cast<std::ptrdiff_t>(dim) + axis)
380 : static_cast<std::size_t>(axis);
381 }
382
383 template <class E, class C>
384 inline std::enable_if_t<
385 !xtl::is_integral<std::decay_t<C>>::value && xtl::is_signed<typename std::decay_t<C>::value_type>::value,
386 rebind_container_t<std::size_t, std::decay_t<C>>>
387 normalize_axis(E& expr, C&& axes)
388 {
389 rebind_container_t<std::size_t, std::decay_t<C>> res;
390 resize_container(res, axes.size());
391
392 for (std::size_t i = 0; i < axes.size(); ++i)
393 {
394 res[i] = normalize_axis(expr.dimension(), axes[i]);
395 }
396
397 XTENSOR_ASSERT(std::all_of(
398 res.begin(),
399 res.end(),
400 [&expr](auto ax_el)
401 {
402 return ax_el < expr.dimension();
403 }
404 ));
405
406 return res;
407 }
408
409 template <class C, class E>
410 inline std::enable_if_t<
411 !xtl::is_integral<std::decay_t<C>>::value && std::is_unsigned<typename std::decay_t<C>::value_type>::value,
412 C&&>
413 normalize_axis(E& expr, C&& axes)
414 {
415 static_cast<void>(expr);
416 XTENSOR_ASSERT(std::all_of(
417 axes.begin(),
418 axes.end(),
419 [&expr](auto ax_el)
420 {
421 return ax_el < expr.dimension();
422 }
423 ));
424 return std::forward<C>(axes);
425 }
426
427 template <class R, class E, class C>
428 inline auto forward_normalize(E& expr, C&& axes)
429 -> std::enable_if_t<xtl::is_signed<std::decay_t<decltype(*std::begin(axes))>>::value, R>
430 {
431 R res;
432 xt::resize_container(res, std::size(axes));
433 auto dim = expr.dimension();
434 std::transform(
435 std::begin(axes),
436 std::end(axes),
437 std::begin(res),
438 [&dim](auto ax_el)
439 {
440 return normalize_axis(dim, ax_el);
441 }
442 );
443
444 XTENSOR_ASSERT(std::all_of(
445 res.begin(),
446 res.end(),
447 [&expr](auto ax_el)
448 {
449 return ax_el < expr.dimension();
450 }
451 ));
452
453 return res;
454 }
455
456 template <class R, class E, class C>
457 inline auto forward_normalize(E& expr, C&& axes) -> std::enable_if_t<
458 !xtl::is_signed<std::decay_t<decltype(*std::begin(axes))>>::value && !std::is_same<R, std::decay_t<C>>::value,
459 R>
460 {
461 static_cast<void>(expr);
462
463 R res;
464 xt::resize_container(res, std::size(axes));
465 std::copy(std::begin(axes), std::end(axes), std::begin(res));
466 XTENSOR_ASSERT(std::all_of(
467 res.begin(),
468 res.end(),
469 [&expr](auto ax_el)
470 {
471 return ax_el < expr.dimension();
472 }
473 ));
474 return res;
475 }
476
477 template <class R, class E, class C>
478 inline auto forward_normalize(E& expr, C&& axes) -> std::enable_if_t<
479 !xtl::is_signed<std::decay_t<decltype(*std::begin(axes))>>::value && std::is_same<R, std::decay_t<C>>::value,
480 R&&>
481 {
482 static_cast<void>(expr);
483 XTENSOR_ASSERT(std::all_of(
484 std::begin(axes),
485 std::end(axes),
486 [&expr](auto ax_el)
487 {
488 return ax_el < expr.dimension();
489 }
490 ));
491 return std::move(axes);
492 }
493
494 /******************
495 * get_value_type *
496 ******************/
497
498 template <class T, class = void_t<>>
500 {
501 using type = T;
502 };
503
504 template <class T>
505 struct get_value_type<T, void_t<typename T::value_type>>
506 {
507 using type = typename T::value_type;
508 };
509
510 template <class T>
511 using get_value_type_t = typename get_value_type<T>::type;
512
513 /**********************
514 * get implementation *
515 **********************/
516
517 // When subclassing from std::tuple not all compilers are able to correctly instantiate get
518 // See here: https://stackoverflow.com/a/37188019/2528668
519 template <std::size_t I, template <typename... Args> class T, typename... Args>
520 decltype(auto) get(T<Args...>&& v)
521 {
522 return std::get<I>(static_cast<std::tuple<Args...>&&>(v));
523 }
524
525 template <std::size_t I, template <typename... Args> class T, typename... Args>
526 decltype(auto) get(T<Args...>& v)
527 {
528 return std::get<I>(static_cast<std::tuple<Args...>&>(v));
529 }
530
531 template <std::size_t I, template <typename... Args> class T, typename... Args>
532 decltype(auto) get(const T<Args...>& v)
533 {
534 return std::get<I>(static_cast<const std::tuple<Args...>&>(v));
535 }
536
537 /**************************
538 * to_array implementation *
539 ***************************/
540
541 namespace detail
542 {
543 template <class T, std::size_t N, std::size_t... I>
544 constexpr std::array<std::remove_cv_t<T>, N> to_array_impl(T (&a)[N], std::index_sequence<I...>)
545 {
546 return {{a[I]...}};
547 }
548 }
549
550 template <class T, std::size_t N>
551 constexpr std::array<std::remove_cv_t<T>, N> to_array(T (&a)[N])
552 {
553 return detail::to_array_impl(a, std::make_index_sequence<N>{});
554 }
555
556 /***********************************
557 * has_storage_type implementation *
558 ***********************************/
559
560 template <class T, class = void>
561 struct has_storage_type : std::false_type
562 {
563 };
564
565 template <class T>
567
568 template <class T>
569 struct has_storage_type<T, void_t<typename xcontainer_inner_types<T>::storage_type>>
570 : std::negation<
571 std::is_same<typename std::remove_cv<typename xcontainer_inner_types<T>::storage_type>::type, invalid_type>>
572 {
573 };
574
575 /*************************************
576 * has_data_interface implementation *
577 *************************************/
578
579 template <class E, class = void>
580 struct has_data_interface : std::false_type
581 {
582 };
583
584 template <class E>
585 struct has_data_interface<E, void_t<decltype(std::declval<E>().data())>> : std::true_type
586 {
587 };
588
589 template <class E>
591
592 template <class E, class = void>
593 struct has_strides : std::false_type
594 {
595 };
596
597 template <class E>
598 struct has_strides<E, void_t<decltype(std::declval<E>().strides())>> : std::true_type
599 {
600 };
601
602 template <class E, class = void>
603 struct has_iterator_interface : std::false_type
604 {
605 };
606
607 template <class E>
608 struct has_iterator_interface<E, void_t<decltype(std::declval<E>().begin())>> : std::true_type
609 {
610 };
611
612 template <class E>
614
615 /******************************
616 * is_iterator implementation *
617 ******************************/
618
619 template <class E, class = void>
620 struct is_iterator : std::false_type
621 {
622 };
623
624 template <class E>
626 E,
627 void_t<
628 decltype(*std::declval<const E>(), std::declval<const E>() == std::declval<const E>(), std::declval<const E>() != std::declval<const E>(), ++(*std::declval<E*>()), (*std::declval<E*>())++, std::true_type())>>
629 : std::true_type
630 {
631 };
632
633 template <typename E>
635
636 /********************************************
637 * xtrivial_default_construct implemenation *
638 ********************************************/
639
640#if defined(_GLIBCXX_RELEASE) && _GLIBCXX_RELEASE >= 7
641// has_trivial_default_constructor has not been available since libstdc++-7.
642#define XTENSOR_GLIBCXX_USE_CXX11_ABI 1
643#else
644#if defined(_GLIBCXX_USE_CXX11_ABI)
645#if _GLIBCXX_USE_CXX11_ABI || (defined(_GLIBCXX_USE_DUAL_ABI) && !_GLIBCXX_USE_DUAL_ABI)
646#define XTENSOR_GLIBCXX_USE_CXX11_ABI 1
647#endif
648#endif
649#endif
650
651#if !defined(__GNUG__) || defined(_LIBCPP_VERSION) || defined(XTENSOR_GLIBCXX_USE_CXX11_ABI)
652
653 template <class T>
654 using xtrivially_default_constructible = std::is_trivially_default_constructible<T>;
655
656#else
657
658 template <class T>
659 using xtrivially_default_constructible = std::has_trivial_default_constructor<T>;
660
661#endif
662#undef XTENSOR_GLIBCXX_USE_CXX11_ABI
663
664 /*************************
665 * conditional type cast *
666 *************************/
667
668 template <bool condition, class T>
670
671 template <class T>
672 struct conditional_cast_functor<false, T> : public xtl::identity
673 {
674 };
675
676 template <class T>
678 {
679 template <class U>
680 inline auto operator()(U&& u) const
681 {
682 return static_cast<T>(std::forward<U>(u));
683 }
684 };
685
694 template <bool condition, class T, class U>
695 inline auto conditional_cast(U&& u)
696 {
697 return conditional_cast_functor<condition, T>()(std::forward<U>(u));
698 }
699
700 /**********************
701 * tracking allocator *
702 **********************/
703
704 namespace alloc_tracking
705 {
706 inline bool& enabled()
707 {
708 static bool enabled;
709 return enabled;
710 }
711
712 inline void enable()
713 {
714 enabled() = true;
715 }
716
717 inline void disable()
718 {
719 enabled() = false;
720 }
721
722 enum policy
723 {
724 print,
725 assert
726 };
727 }
728
729 template <class T, class A, alloc_tracking::policy P>
730 struct tracking_allocator : private A
731 {
732 using base_type = A;
733 using value_type = typename A::value_type;
734 using reference = value_type&;
735 using const_reference = const value_type&;
736 using pointer = typename std::allocator_traits<A>::pointer;
737 using const_pointer = typename std::allocator_traits<A>::const_pointer;
738 using size_type = typename std::allocator_traits<A>::size_type;
739 using difference_type = typename std::allocator_traits<A>::difference_type;
740
741 tracking_allocator() = default;
742
743 T* allocate(std::size_t n)
744 {
745 if (alloc_tracking::enabled())
746 {
747 if (P == alloc_tracking::print)
748 {
749 std::cout << "xtensor allocating: " << n << "" << std::endl;
750 }
751 else if (P == alloc_tracking::assert)
752 {
753 XTENSOR_THROW(
754 std::runtime_error,
755 "xtensor allocation of " + std::to_string(n) + " elements detected"
756 );
757 }
758 }
759 return base_type::allocate(n);
760 }
761
762 using base_type::deallocate;
763
764// Construct and destroy are removed in --std=c++-20
765#if ((defined(__cplusplus) && __cplusplus < 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG < 202002L))
766 using base_type::construct;
767 using base_type::destroy;
768#endif
769
770 template <class U>
771 struct rebind
772 {
773 using traits = std::allocator_traits<A>;
774 using other = tracking_allocator<U, typename traits::template rebind_alloc<U>, P>;
775 };
776 };
777
778 template <class T, class AT, alloc_tracking::policy PT, class U, class AU, alloc_tracking::policy PU>
779 inline bool operator==(const tracking_allocator<T, AT, PT>&, const tracking_allocator<U, AU, PU>&)
780 {
781 return std::is_same<AT, AU>::value;
782 }
783
784 template <class T, class AT, alloc_tracking::policy PT, class U, class AU, alloc_tracking::policy PU>
785 inline bool operator!=(const tracking_allocator<T, AT, PT>& a, const tracking_allocator<U, AU, PU>& b)
786 {
787 return !(a == b);
788 }
789
790 /*****************
791 * has_assign_to *
792 *****************/
793
794 template <class E1, class E2, class = void>
795 struct has_assign_to : std::false_type
796 {
797 };
798
799 template <class E1, class E2>
800 struct has_assign_to<E1, E2, void_t<decltype(std::declval<const E2&>().assign_to(std::declval<E1&>()))>>
801 : std::true_type
802 {
803 };
804
805 template <class E1, class E2>
806 constexpr bool has_assign_to_v = has_assign_to<E1, E2>::value;
807
808 /*************************************
809 * overlapping_memory_checker_traits *
810 *************************************/
811
812 template <class T, class Enable = void>
813 struct has_memory_address : std::false_type
814 {
815 };
816
817 template <class T>
818 struct has_memory_address<T, void_t<decltype(std::addressof(*std::declval<T>().begin()))>> : std::true_type
819 {
820 };
821
822 template <typename T>
824 template <typename T>
826
827 struct memory_range
828 {
829 // Checking pointer overlap is more correct in integer values,
830 // for more explanation check https://devblogs.microsoft.com/oldnewthing/20170927-00/?p=97095
831 const uintptr_t m_first = 0;
832 const uintptr_t m_last = 0;
833
834 explicit memory_range() = default;
835
836 template <class T>
837 explicit memory_range(T* first, T* last)
838 : m_first(reinterpret_cast<uintptr_t>(last < first ? last : first))
839 , m_last(reinterpret_cast<uintptr_t>(last < first ? first : last))
840 {
841 }
842
843 template <class T>
844 bool overlaps(T* first, T* last) const
845 {
846 if (first <= last)
847 {
848 return reinterpret_cast<uintptr_t>(first) <= m_last
849 && reinterpret_cast<uintptr_t>(last) >= m_first;
850 }
851 else
852 {
853 return reinterpret_cast<uintptr_t>(last) <= m_last
854 && reinterpret_cast<uintptr_t>(first) >= m_first;
855 }
856 }
857 };
858
859 template <class E, class Enable = void>
861 {
862 static bool check_overlap(const E&, const memory_range&)
863 {
864 return true;
865 }
866 };
867
868 template <class E>
869 struct overlapping_memory_checker_traits<E, std::enable_if_t<has_memory_address<E>::value>>
870 {
871 static bool check_overlap(const E& expr, const memory_range& dst_range)
872 {
873 if (expr.size() == 0)
874 {
875 return false;
876 }
877 else
878 {
879 return dst_range.overlaps(std::addressof(*expr.begin()), std::addressof(*expr.rbegin()));
880 }
881 }
882 };
883
884 struct overlapping_memory_checker_base
885 {
886 memory_range m_dst_range;
887
888 explicit overlapping_memory_checker_base() = default;
889
890 explicit overlapping_memory_checker_base(memory_range dst_memory_range)
891 : m_dst_range(std::move(dst_memory_range))
892 {
893 }
894
895 template <class E>
896 bool check_overlap(const E& expr) const
897 {
898 if (!m_dst_range.m_first || !m_dst_range.m_last)
899 {
900 return false;
901 }
902 else
903 {
904 return overlapping_memory_checker_traits<E>::check_overlap(expr, m_dst_range);
905 }
906 }
907 };
908
909 template <class Dst, class Enable = void>
910 struct overlapping_memory_checker : overlapping_memory_checker_base
911 {
912 explicit overlapping_memory_checker(const Dst&)
913 : overlapping_memory_checker_base()
914 {
915 }
916 };
917
918 template <class Dst>
919 struct overlapping_memory_checker<Dst, std::enable_if_t<has_memory_address<Dst>::value>>
920 : overlapping_memory_checker_base
921 {
922 explicit overlapping_memory_checker(const Dst& aDst)
923 : overlapping_memory_checker_base(
924 [&]()
925 {
926 if (aDst.size() == 0)
927 {
928 return memory_range();
929 }
930 else
931 {
932 return memory_range(std::addressof(*aDst.begin()), std::addressof(*aDst.rbegin()));
933 }
934 }()
935 )
936 {
937 }
938 };
939
940 template <class Dst>
941 auto make_overlapping_memory_checker(const Dst& a_dst)
942 {
944 }
945
946 /********************
947 * rebind_container *
948 ********************/
949
950 template <class X, template <class, class> class C, class T, class A>
951 struct rebind_container<X, C<T, A>>
952 {
953 using traits = std::allocator_traits<A>;
954 using allocator = typename traits::template rebind_alloc<X>;
955 using type = C<X, allocator>;
956 };
957
958// Workaround for rebind_container problems when C++17 feature is enabled
959#ifdef __cpp_template_template_args
960 template <class X, class T, std::size_t N>
961 struct rebind_container<X, std::array<T, N>>
962 {
963 using type = std::array<X, N>;
964 };
965#else
966 template <class X, template <class, std::size_t> class C, class T, std::size_t N>
967 struct rebind_container<X, C<T, N>>
968 {
969 using type = C<X, N>;
970 };
971#endif
972
973 /********************
974 * get_strides_type *
975 ********************/
976
977 template <class S>
979 {
980 using type = typename rebind_container<std::ptrdiff_t, S>::type;
981 };
982
983 template <std::size_t... I>
985 {
986 // TODO we could compute the strides statically here.
987 // But we'll need full constexpr support to have a
988 // homogenous ``compute_strides`` method
989 using type = std::array<std::ptrdiff_t, sizeof...(I)>;
990 };
991
992 template <class CP, class O, class A>
993 class xbuffer_adaptor;
994
995 template <class CP, class O, class A>
997 {
998 // In bindings this mapping is called by reshape_view with an inner shape of type
999 // xbuffer_adaptor.
1000 // Since we cannot create a buffer adaptor holding data, we map it to an std::vector.
1001 using type = std::vector<
1002 typename xbuffer_adaptor<CP, O, A>::value_type,
1003 typename xbuffer_adaptor<CP, O, A>::allocator_type>;
1004 };
1005
1006
1007 template <class C>
1008 using get_strides_t = typename get_strides_type<C>::type;
1009
1010 /*******************
1011 * inner_reference *
1012 *******************/
1013
1014 template <class ST>
1016 {
1017 using storage_type = std::decay_t<ST>;
1018 using type = std::conditional_t<
1019 std::is_const<std::remove_reference_t<ST>>::value,
1020 typename storage_type::const_reference,
1021 typename storage_type::reference>;
1022 };
1023
1024 template <class ST>
1025 using inner_reference_t = typename inner_reference<ST>::type;
1026
1027 /************
1028 * get_rank *
1029 ************/
1030
1031 template <class E, typename = void>
1033 {
1034 static constexpr std::size_t value = SIZE_MAX;
1035 };
1036
1037 template <class E>
1038 struct get_rank<E, decltype((void) E::rank, void())>
1039 {
1040 static constexpr std::size_t value = E::rank;
1041 };
1042
1043 /******************
1044 * has_fixed_rank *
1045 ******************/
1046
1047 template <class E>
1049 {
1050 using type = std::integral_constant<bool, get_rank<std::decay_t<E>>::value != SIZE_MAX>;
1051 };
1052
1053 template <class E>
1054 using has_fixed_rank_t = typename has_fixed_rank<std::decay_t<E>>::type;
1055
1056 /************
1057 * has_rank *
1058 ************/
1059
1060 template <class E, size_t N>
1062 {
1063 using type = std::integral_constant<bool, get_rank<std::decay_t<E>>::value == N>;
1064 };
1065
1066 template <class E, size_t N>
1067 using has_rank_t = typename has_rank<std::decay_t<E>, N>::type;
1068
1069}
1070
1071#endif
Fixed shape implementation for compile time defined arrays.
auto arg(E &&e) noexcept
Calculates the phase angle (in radians) elementwise for the complex numbers in e.
Definition xcomplex.hpp:221
auto strides(const E &e, stride_type type=stride_type::normal) noexcept
Get strides of an object.
Definition xstrides.hpp:248
standard mathematical functions for xexpressions
auto accumulate(F &&f, E &&e, EVS evaluation_strategy=EVS())
Accumulate and flatten array NOTE This function is not lazy!
auto conditional_cast(U &&u)
Perform a type cast when a condition is true.
Definition xutils.hpp:695