xtensor
 
Loading...
Searching...
No Matches
xutils.hpp
1/***************************************************************************
2 * Copyright (c) Johan Mabille, Sylvain Corlay and Wolf Vollprecht *
3 * Copyright (c) QuantStack *
4 * *
5 * Distributed under the terms of the BSD 3-Clause License. *
6 * *
7 * The full license is in the file LICENSE, distributed with this software. *
8 ****************************************************************************/
9
10#ifndef XTENSOR_UTILS_HPP
11#define XTENSOR_UTILS_HPP
12
13#include <algorithm>
14#include <array>
15#include <cmath>
16#include <complex>
17#include <cstddef>
18#include <initializer_list>
19#include <iostream>
20#include <memory>
21#include <tuple>
22#include <type_traits>
23#include <utility>
24#include <vector>
25
26#include <xtl/xfunctional.hpp>
27#include <xtl/xmeta_utils.hpp>
28#include <xtl/xsequence.hpp>
29#include <xtl/xtype_traits.hpp>
30
31#include "../core/xtensor_config.hpp"
32
33#if (defined(_MSC_VER) && _MSC_VER >= 1910)
34#define NOEXCEPT(T)
35#else
36#define NOEXCEPT(T) noexcept(T)
37#endif
38
39namespace xt
40{
41 /****************
42 * declarations *
43 ****************/
44
45 template <class T>
46 struct remove_class;
47
48 /*template <class F, class... T>
49 void for_each(F&& f, std::tuple<T...>& t) noexcept(implementation_dependent);*/
50
51 /*template <class F, class R, class... T>
52 R accumulate(F&& f, R init, const std::tuple<T...>& t) noexcept(implementation_dependent);*/
53
54 template <std::size_t I, class... Args>
55 constexpr decltype(auto) argument(Args&&... args) noexcept;
56
57 template <class R, class F, class... S>
58 R apply(std::size_t index, F&& func, const std::tuple<S...>& s) NOEXCEPT(noexcept(func(std::get<0>(s))));
59
60 template <class T, class S>
61 void nested_copy(T&& iter, const S& s);
62
63 template <class T, class S>
64 void nested_copy(T&& iter, std::initializer_list<S> s);
65
66 template <class C>
67 bool resize_container(C& c, typename C::size_type size);
68
69 template <class T, std::size_t N>
70 bool resize_container(std::array<T, N>& a, typename std::array<T, N>::size_type size);
71
72 template <std::size_t... I>
73 class fixed_shape;
74
75 template <std::size_t... I>
76 bool resize_container(fixed_shape<I...>& a, std::size_t size);
77
78 template <class X, class C>
80
81 template <class X, class C>
82 using rebind_container_t = typename rebind_container<X, C>::type;
83
84 std::size_t normalize_axis(std::size_t dim, std::ptrdiff_t axis);
85
86 // gcc 4.9 is affected by C++14 defect CGW 1558
87 // see http://open-std.org/JTC1/SC22/WG21/docs/cwg_defects.html#1558
88 template <class... T>
89 struct make_void
90 {
91 using type = void;
92 };
93
94 template <class... T>
95 using void_t = typename make_void<T...>::type;
96
97 // This is used for non existent types (e.g. storage for some expressions
98 // like generators)
100 {
101 };
102
103 template <class... T>
105 {
106 using type = invalid_type;
107 };
108
109 template <class T, class R>
110 using disable_integral_t = std::enable_if_t<!xtl::is_integral<T>::value, R>;
111
112 /********************************
113 * meta identity implementation *
114 ********************************/
115
116 template <class T>
118 {
119 using type = T;
120 };
121
122 /***************************************
123 * is_specialization_of implementation *
124 ***************************************/
125
126 template <template <class...> class TT, class T>
127 struct is_specialization_of : std::false_type
128 {
129 };
130
131 template <template <class...> class TT, class... Ts>
132 struct is_specialization_of<TT, TT<Ts...>> : std::true_type
133 {
134 };
135
136 /*******************************
137 * remove_class implementation *
138 *******************************/
139
140 template <class T>
142 {
143 };
144
145 template <class C, class R, class... Args>
146 struct remove_class<R (C::*)(Args...)>
147 {
148 typedef R type(Args...);
149 };
150
151 template <class C, class R, class... Args>
152 struct remove_class<R (C::*)(Args...) const>
153 {
154 typedef R type(Args...);
155 };
156
157 template <class T>
158 using remove_class_t = typename remove_class<T>::type;
159
160 /***************************
161 * for_each implementation *
162 ***************************/
163
164 namespace detail
165 {
166 template <std::size_t I, class F, class... T>
167 inline typename std::enable_if<I == sizeof...(T), void>::type
168 for_each_impl(F&& /*f*/, std::tuple<T...>& /*t*/) noexcept
169 {
170 }
171
172 template <std::size_t I, class F, class... T>
173 inline typename std::enable_if < I<sizeof...(T), void>::type
174 for_each_impl(F&& f, std::tuple<T...>& t) noexcept(noexcept(f(std::get<I>(t))))
175 {
176 f(std::get<I>(t));
177 for_each_impl<I + 1, F, T...>(std::forward<F>(f), t);
178 }
179 }
180
181 template <class F, class... T>
182 inline void for_each(F&& f, std::tuple<T...>& t) noexcept(
183 noexcept(detail::for_each_impl<0, F, T...>(std::forward<F>(f), t))
184 )
185 {
186 detail::for_each_impl<0, F, T...>(std::forward<F>(f), t);
187 }
188
189 namespace detail
190 {
191 template <std::size_t I, class F, class... T>
192 inline typename std::enable_if<I == sizeof...(T), void>::type
193 for_each_impl(F&& /*f*/, const std::tuple<T...>& /*t*/) noexcept
194 {
195 }
196
197 template <std::size_t I, class F, class... T>
198 inline typename std::enable_if < I<sizeof...(T), void>::type
199 for_each_impl(F&& f, const std::tuple<T...>& t) noexcept(noexcept(f(std::get<I>(t))))
200 {
201 f(std::get<I>(t));
202 for_each_impl<I + 1, F, T...>(std::forward<F>(f), t);
203 }
204 }
205
206 template <class F, class... T>
207 inline void for_each(F&& f, const std::tuple<T...>& t) noexcept(
208 noexcept(detail::for_each_impl<0, F, T...>(std::forward<F>(f), t))
209 )
210 {
211 detail::for_each_impl<0, F, T...>(std::forward<F>(f), t);
212 }
213
214 /*****************************
215 * accumulate implementation *
216 *****************************/
217
219
220 namespace detail
221 {
222 template <std::size_t I, class F, class R, class... T>
223 inline std::enable_if_t<I == sizeof...(T), R>
224 accumulate_impl(F&& /*f*/, R init, const std::tuple<T...>& /*t*/) noexcept
225 {
226 return init;
227 }
228
229 template <std::size_t I, class F, class R, class... T>
230 inline std::enable_if_t < I<sizeof...(T), R>
231 accumulate_impl(F&& f, R init, const std::tuple<T...>& t) noexcept(noexcept(f(init, std::get<I>(t))))
232 {
233 R res = f(init, std::get<I>(t));
234 return accumulate_impl<I + 1, F, R, T...>(std::forward<F>(f), res, t);
235 }
236 }
237
238 template <class F, class R, class... T>
239 inline R accumulate(F&& f, R init, const std::tuple<T...>& t) noexcept(
240 noexcept(detail::accumulate_impl<0, F, R, T...>(std::forward<F>(f), init, t))
241 )
242 {
243 return detail::accumulate_impl<0, F, R, T...>(std::forward<F>(f), init, t);
244 }
245
247
248 /***************************
249 * argument implementation *
250 ***************************/
251
252 namespace detail
253 {
254 template <std::size_t I>
255 struct getter
256 {
257 template <class Arg, class... Args>
258 static constexpr decltype(auto) get(Arg&& /*arg*/, Args&&... args) noexcept
259 {
260 return getter<I - 1>::get(std::forward<Args>(args)...);
261 }
262 };
263
264 template <>
265 struct getter<0>
266 {
267 template <class Arg, class... Args>
268 static constexpr Arg&& get(Arg&& arg, Args&&... /*args*/) noexcept
269 {
270 return std::forward<Arg>(arg);
271 }
272 };
273 }
274
275 template <std::size_t I, class... Args>
276 constexpr decltype(auto) argument(Args&&... args) noexcept
277 {
278 static_assert(I < sizeof...(Args), "I should be lesser than sizeof...(Args)");
279 return detail::getter<I>::get(std::forward<Args>(args)...);
280 }
281
282 /************************
283 * apply implementation *
284 ************************/
285
286 template <class R, class F, class... S>
287 inline R apply(std::size_t index, F&& func, const std::tuple<S...>& s)
288 NOEXCEPT(noexcept(func(std::get<0>(s))))
289 {
290 XTENSOR_ASSERT(sizeof...(S) > index);
291 return std::apply(
292 [&](const S&... args) -> R
293 {
294 auto f_impl = [&](auto&& self, auto&& i, auto&& h, auto&&... t) -> R
295 {
296 if (i == index)
297 {
298 return static_cast<R>(func(h));
299 }
300 if constexpr (sizeof...(t) > 0)
301 {
302 return self(self, std::size_t{i + 1}, t...);
303 }
304 return R{};
305 };
306 return f_impl(f_impl, std::size_t{0}, args...);
307 },
308 s
309 );
310 }
311
312 /***************************
313 * nested_initializer_list *
314 ***************************/
315
316 template <class T, std::size_t I>
318 {
319 using type = std::initializer_list<typename nested_initializer_list<T, I - 1>::type>;
320 };
321
322 template <class T>
324 {
325 using type = T;
326 };
327
328 template <class T, std::size_t I>
329 using nested_initializer_list_t = typename nested_initializer_list<T, I>::type;
330
331 /******************************
332 * nested_copy implementation *
333 ******************************/
334
335 template <class T, class S>
336 inline void nested_copy(T&& iter, const S& s)
337 {
338 *iter++ = s;
339 }
340
341 template <class T, class S>
342 inline void nested_copy(T&& iter, std::initializer_list<S> s)
343 {
344 for (auto it = s.begin(); it != s.end(); ++it)
345 {
346 nested_copy(std::forward<T>(iter), *it);
347 }
348 }
349
350 /***********************************
351 * resize_container implementation *
352 ***********************************/
353 template <class C>
354 inline bool resize_container(C& c, typename C::size_type size)
355 {
356 c.resize(size);
357 return true;
358 }
359
360 template <class T, std::size_t N>
361 inline bool resize_container(std::array<T, N>& /*a*/, typename std::array<T, N>::size_type size)
362 {
363 return size == N;
364 }
365
366 template <std::size_t... I>
367 inline bool resize_container(xt::fixed_shape<I...>&, std::size_t size)
368 {
369 return sizeof...(I) == size;
370 }
371
372 /*********************************
373 * normalize_axis implementation *
374 *********************************/
375
376 // scalar normalize axis
377 inline std::size_t normalize_axis(std::size_t dim, std::ptrdiff_t axis)
378 {
379 return axis < 0 ? static_cast<std::size_t>(static_cast<std::ptrdiff_t>(dim) + axis)
380 : static_cast<std::size_t>(axis);
381 }
382
383 template <class E, class C>
384 inline std::enable_if_t<
385 !xtl::is_integral<std::decay_t<C>>::value && xtl::is_signed<typename std::decay_t<C>::value_type>::value,
386 rebind_container_t<std::size_t, std::decay_t<C>>>
387 normalize_axis(E& expr, C&& axes)
388 {
389 rebind_container_t<std::size_t, std::decay_t<C>> res;
390 resize_container(res, axes.size());
391
392 for (std::size_t i = 0; i < axes.size(); ++i)
393 {
394 res[i] = normalize_axis(expr.dimension(), axes[i]);
395 }
396
397 XTENSOR_ASSERT(std::all_of(
398 res.begin(),
399 res.end(),
400 [&expr](auto ax_el)
401 {
402 return ax_el < expr.dimension();
403 }
404 ));
405
406 return res;
407 }
408
409 template <class C, class E>
410 inline std::enable_if_t<
411 !xtl::is_integral<std::decay_t<C>>::value && std::is_unsigned<typename std::decay_t<C>::value_type>::value,
412 C&&>
413 normalize_axis(E& expr, C&& axes)
414 {
415 static_cast<void>(expr);
416 XTENSOR_ASSERT(std::all_of(
417 axes.begin(),
418 axes.end(),
419 [&expr](auto ax_el)
420 {
421 return ax_el < expr.dimension();
422 }
423 ));
424 return std::forward<C>(axes);
425 }
426
427 template <class R, class E, class C>
428 inline auto forward_normalize(E& expr, C&& axes)
429 -> std::enable_if_t<xtl::is_signed<std::decay_t<decltype(*std::begin(axes))>>::value, R>
430 {
431 R res;
432 xt::resize_container(res, std::size(axes));
433 auto dim = expr.dimension();
434 std::transform(
435 std::begin(axes),
436 std::end(axes),
437 std::begin(res),
438 [&dim](auto ax_el)
439 {
440 return normalize_axis(dim, ax_el);
441 }
442 );
443
444 XTENSOR_ASSERT(std::all_of(
445 res.begin(),
446 res.end(),
447 [&expr](auto ax_el)
448 {
449 return ax_el < expr.dimension();
450 }
451 ));
452
453 return res;
454 }
455
456 template <class R, class E, class C>
457 inline auto forward_normalize(E& expr, C&& axes) -> std::enable_if_t<
458 !xtl::is_signed<std::decay_t<decltype(*std::begin(axes))>>::value && !std::is_same<R, std::decay_t<C>>::value,
459 R>
460 {
461 static_cast<void>(expr);
462
463 R res;
464 xt::resize_container(res, std::size(axes));
465 std::copy(std::begin(axes), std::end(axes), std::begin(res));
466 XTENSOR_ASSERT(std::all_of(
467 res.begin(),
468 res.end(),
469 [&expr](auto ax_el)
470 {
471 return ax_el < expr.dimension();
472 }
473 ));
474 return res;
475 }
476
477 template <class R, class E, class C>
478 inline auto forward_normalize(E& expr, C&& axes) -> std::enable_if_t<
479 !xtl::is_signed<std::decay_t<decltype(*std::begin(axes))>>::value && std::is_same<R, std::decay_t<C>>::value,
480 R&&>
481 {
482 static_cast<void>(expr);
483 XTENSOR_ASSERT(std::all_of(
484 std::begin(axes),
485 std::end(axes),
486 [&expr](auto ax_el)
487 {
488 return ax_el < expr.dimension();
489 }
490 ));
491 return std::move(axes);
492 }
493
494 /******************
495 * get_value_type *
496 ******************/
497
498 template <class T, class = void_t<>>
500 {
501 using type = T;
502 };
503
504 template <class T>
505 struct get_value_type<T, void_t<typename T::value_type>>
506 {
507 using type = typename T::value_type;
508 };
509
510 template <class T>
511 using get_value_type_t = typename get_value_type<T>::type;
512
513 /**********************
514 * get implementation *
515 **********************/
516
517 // When subclassing from std::tuple not all compilers are able to correctly instantiate get
518 // See here: https://stackoverflow.com/a/37188019/2528668
519 template <std::size_t I, template <typename... Args> class T, typename... Args>
520 decltype(auto) get(T<Args...>&& v)
521 {
522 return std::get<I>(static_cast<std::tuple<Args...>&&>(v));
523 }
524
525 template <std::size_t I, template <typename... Args> class T, typename... Args>
526 decltype(auto) get(T<Args...>& v)
527 {
528 return std::get<I>(static_cast<std::tuple<Args...>&>(v));
529 }
530
531 template <std::size_t I, template <typename... Args> class T, typename... Args>
532 decltype(auto) get(const T<Args...>& v)
533 {
534 return std::get<I>(static_cast<const std::tuple<Args...>&>(v));
535 }
536
537 /**************************
538 * to_array implementation *
539 ***************************/
540
541 namespace detail
542 {
543 template <class T, std::size_t N, std::size_t... I>
544 constexpr std::array<std::remove_cv_t<T>, N> to_array_impl(T (&a)[N], std::index_sequence<I...>)
545 {
546 return {{a[I]...}};
547 }
548 }
549
550 template <class T, std::size_t N>
551 constexpr std::array<std::remove_cv_t<T>, N> to_array(T (&a)[N])
552 {
553 return detail::to_array_impl(a, std::make_index_sequence<N>{});
554 }
555
556 /***********************************
557 * has_storage_type implementation *
558 ***********************************/
559
560 template <class T, class = void>
561 struct has_storage_type : std::false_type
562 {
563 };
564
565 template <class T>
567
568 template <class T>
569 struct has_storage_type<T, void_t<typename xcontainer_inner_types<T>::storage_type>>
570 : std::negation<
571 std::is_same<typename std::remove_cv<typename xcontainer_inner_types<T>::storage_type>::type, invalid_type>>
572 {
573 };
574
575 /*************************************
576 * has_data_interface implementation *
577 *************************************/
578
579 template <class E, class = void>
580 struct has_data_interface : std::false_type
581 {
582 };
583
584 template <class E>
585 struct has_data_interface<E, void_t<decltype(std::declval<E>().data())>> : std::true_type
586 {
587 };
588
589 template <class E, class = void>
590 struct has_strides : std::false_type
591 {
592 };
593
594 template <class E>
595 struct has_strides<E, void_t<decltype(std::declval<E>().strides())>> : std::true_type
596 {
597 };
598
599 template <class E, class = void>
600 struct has_iterator_interface : std::false_type
601 {
602 };
603
604 template <class E>
605 struct has_iterator_interface<E, void_t<decltype(std::declval<E>().begin())>> : std::true_type
606 {
607 };
608
609 /******************************
610 * is_iterator implementation *
611 ******************************/
612
613 template <class E, class = void>
614 struct is_iterator : std::false_type
615 {
616 };
617
618 template <class E>
620 E,
621 void_t<
622 decltype(*std::declval<const E>(), std::declval<const E>() == std::declval<const E>(), std::declval<const E>() != std::declval<const E>(), ++(*std::declval<E*>()), (*std::declval<E*>())++, std::true_type())>>
623 : std::true_type
624 {
625 };
626
627 /********************************************
628 * xtrivial_default_construct implemenation *
629 ********************************************/
630
631#if defined(_GLIBCXX_RELEASE) && _GLIBCXX_RELEASE >= 7
632// has_trivial_default_constructor has not been available since libstdc++-7.
633#define XTENSOR_GLIBCXX_USE_CXX11_ABI 1
634#else
635#if defined(_GLIBCXX_USE_CXX11_ABI)
636#if _GLIBCXX_USE_CXX11_ABI || (defined(_GLIBCXX_USE_DUAL_ABI) && !_GLIBCXX_USE_DUAL_ABI)
637#define XTENSOR_GLIBCXX_USE_CXX11_ABI 1
638#endif
639#endif
640#endif
641
642#if !defined(__GNUG__) || defined(_LIBCPP_VERSION) || defined(XTENSOR_GLIBCXX_USE_CXX11_ABI)
643
644 template <class T>
645 using xtrivially_default_constructible = std::is_trivially_default_constructible<T>;
646
647#else
648
649 template <class T>
650 using xtrivially_default_constructible = std::has_trivial_default_constructor<T>;
651
652#endif
653#undef XTENSOR_GLIBCXX_USE_CXX11_ABI
654
655 /*************************
656 * conditional type cast *
657 *************************/
658
659 template <bool condition, class T>
661
662 template <class T>
663 struct conditional_cast_functor<false, T> : public xtl::identity
664 {
665 };
666
667 template <class T>
669 {
670 template <class U>
671 inline auto operator()(U&& u) const
672 {
673 return static_cast<T>(std::forward<U>(u));
674 }
675 };
676
685 template <bool condition, class T, class U>
686 inline auto conditional_cast(U&& u)
687 {
688 return conditional_cast_functor<condition, T>()(std::forward<U>(u));
689 }
690
691 /**********************
692 * tracking allocator *
693 **********************/
694
695 namespace alloc_tracking
696 {
697 inline bool& enabled()
698 {
699 static bool enabled;
700 return enabled;
701 }
702
703 inline void enable()
704 {
705 enabled() = true;
706 }
707
708 inline void disable()
709 {
710 enabled() = false;
711 }
712
713 enum policy
714 {
715 print,
716 assert
717 };
718 }
719
720 template <class T, class A, alloc_tracking::policy P>
721 struct tracking_allocator : private A
722 {
723 using base_type = A;
724 using value_type = typename A::value_type;
725 using reference = value_type&;
726 using const_reference = const value_type&;
727 using pointer = typename std::allocator_traits<A>::pointer;
728 using const_pointer = typename std::allocator_traits<A>::const_pointer;
729 using size_type = typename std::allocator_traits<A>::size_type;
730 using difference_type = typename std::allocator_traits<A>::difference_type;
731
732 tracking_allocator() = default;
733
734 T* allocate(std::size_t n)
735 {
736 if (alloc_tracking::enabled())
737 {
738 if (P == alloc_tracking::print)
739 {
740 std::cout << "xtensor allocating: " << n << "" << std::endl;
741 }
742 else if (P == alloc_tracking::assert)
743 {
744 XTENSOR_THROW(
745 std::runtime_error,
746 "xtensor allocation of " + std::to_string(n) + " elements detected"
747 );
748 }
749 }
750 return base_type::allocate(n);
751 }
752
753 using base_type::deallocate;
754
755// Construct and destroy are removed in --std=c++-20
756#if ((defined(__cplusplus) && __cplusplus < 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG < 202002L))
757 using base_type::construct;
758 using base_type::destroy;
759#endif
760
761 template <class U>
762 struct rebind
763 {
764 using traits = std::allocator_traits<A>;
765 using other = tracking_allocator<U, typename traits::template rebind_alloc<U>, P>;
766 };
767 };
768
769 template <class T, class AT, alloc_tracking::policy PT, class U, class AU, alloc_tracking::policy PU>
770 inline bool operator==(const tracking_allocator<T, AT, PT>&, const tracking_allocator<U, AU, PU>&)
771 {
772 return std::is_same<AT, AU>::value;
773 }
774
775 template <class T, class AT, alloc_tracking::policy PT, class U, class AU, alloc_tracking::policy PU>
776 inline bool operator!=(const tracking_allocator<T, AT, PT>& a, const tracking_allocator<U, AU, PU>& b)
777 {
778 return !(a == b);
779 }
780
781 /*****************
782 * has_assign_to *
783 *****************/
784
785 template <class E1, class E2, class = void>
786 struct has_assign_to : std::false_type
787 {
788 };
789
790 template <class E1, class E2>
791 struct has_assign_to<E1, E2, void_t<decltype(std::declval<const E2&>().assign_to(std::declval<E1&>()))>>
792 : std::true_type
793 {
794 };
795
796 /*************************************
797 * overlapping_memory_checker_traits *
798 *************************************/
799
800 template <class T, class Enable = void>
801 struct has_memory_address : std::false_type
802 {
803 };
804
805 template <class T>
806 struct has_memory_address<T, void_t<decltype(std::addressof(*std::declval<T>().begin()))>> : std::true_type
807 {
808 };
809
810 struct memory_range
811 {
812 // Checking pointer overlap is more correct in integer values,
813 // for more explanation check https://devblogs.microsoft.com/oldnewthing/20170927-00/?p=97095
814 const uintptr_t m_first = 0;
815 const uintptr_t m_last = 0;
816
817 explicit memory_range() = default;
818
819 template <class T>
820 explicit memory_range(T* first, T* last)
821 : m_first(reinterpret_cast<uintptr_t>(last < first ? last : first))
822 , m_last(reinterpret_cast<uintptr_t>(last < first ? first : last))
823 {
824 }
825
826 template <class T>
827 bool overlaps(T* first, T* last) const
828 {
829 if (first <= last)
830 {
831 return reinterpret_cast<uintptr_t>(first) <= m_last
832 && reinterpret_cast<uintptr_t>(last) >= m_first;
833 }
834 else
835 {
836 return reinterpret_cast<uintptr_t>(last) <= m_last
837 && reinterpret_cast<uintptr_t>(first) >= m_first;
838 }
839 }
840 };
841
842 template <class E, class Enable = void>
844 {
845 static bool check_overlap(const E&, const memory_range&)
846 {
847 return true;
848 }
849 };
850
851 template <class E>
852 struct overlapping_memory_checker_traits<E, std::enable_if_t<has_memory_address<E>::value>>
853 {
854 static bool check_overlap(const E& expr, const memory_range& dst_range)
855 {
856 if (expr.size() == 0)
857 {
858 return false;
859 }
860 else
861 {
862 return dst_range.overlaps(std::addressof(*expr.begin()), std::addressof(*expr.rbegin()));
863 }
864 }
865 };
866
867 struct overlapping_memory_checker_base
868 {
869 memory_range m_dst_range;
870
871 explicit overlapping_memory_checker_base() = default;
872
873 explicit overlapping_memory_checker_base(memory_range dst_memory_range)
874 : m_dst_range(std::move(dst_memory_range))
875 {
876 }
877
878 template <class E>
879 bool check_overlap(const E& expr) const
880 {
881 if (!m_dst_range.m_first || !m_dst_range.m_last)
882 {
883 return false;
884 }
885 else
886 {
887 return overlapping_memory_checker_traits<E>::check_overlap(expr, m_dst_range);
888 }
889 }
890 };
891
892 template <class Dst, class Enable = void>
893 struct overlapping_memory_checker : overlapping_memory_checker_base
894 {
895 explicit overlapping_memory_checker(const Dst&)
896 : overlapping_memory_checker_base()
897 {
898 }
899 };
900
901 template <class Dst>
902 struct overlapping_memory_checker<Dst, std::enable_if_t<has_memory_address<Dst>::value>>
903 : overlapping_memory_checker_base
904 {
905 explicit overlapping_memory_checker(const Dst& aDst)
906 : overlapping_memory_checker_base(
907 [&]()
908 {
909 if (aDst.size() == 0)
910 {
911 return memory_range();
912 }
913 else
914 {
915 return memory_range(std::addressof(*aDst.begin()), std::addressof(*aDst.rbegin()));
916 }
917 }()
918 )
919 {
920 }
921 };
922
923 template <class Dst>
924 auto make_overlapping_memory_checker(const Dst& a_dst)
925 {
927 }
928
929 /********************
930 * rebind_container *
931 ********************/
932
933 template <class X, template <class, class> class C, class T, class A>
934 struct rebind_container<X, C<T, A>>
935 {
936 using traits = std::allocator_traits<A>;
937 using allocator = typename traits::template rebind_alloc<X>;
938 using type = C<X, allocator>;
939 };
940
941// Workaround for rebind_container problems when C++17 feature is enabled
942#ifdef __cpp_template_template_args
943 template <class X, class T, std::size_t N>
944 struct rebind_container<X, std::array<T, N>>
945 {
946 using type = std::array<X, N>;
947 };
948#else
949 template <class X, template <class, std::size_t> class C, class T, std::size_t N>
950 struct rebind_container<X, C<T, N>>
951 {
952 using type = C<X, N>;
953 };
954#endif
955
956 /********************
957 * get_strides_type *
958 ********************/
959
960 template <class S>
962 {
963 using type = typename rebind_container<std::ptrdiff_t, S>::type;
964 };
965
966 template <std::size_t... I>
968 {
969 // TODO we could compute the strides statically here.
970 // But we'll need full constexpr support to have a
971 // homogenous ``compute_strides`` method
972 using type = std::array<std::ptrdiff_t, sizeof...(I)>;
973 };
974
975 template <class CP, class O, class A>
976 class xbuffer_adaptor;
977
978 template <class CP, class O, class A>
980 {
981 // In bindings this mapping is called by reshape_view with an inner shape of type
982 // xbuffer_adaptor.
983 // Since we cannot create a buffer adaptor holding data, we map it to an std::vector.
984 using type = std::vector<
985 typename xbuffer_adaptor<CP, O, A>::value_type,
986 typename xbuffer_adaptor<CP, O, A>::allocator_type>;
987 };
988
989
990 template <class C>
991 using get_strides_t = typename get_strides_type<C>::type;
992
993 /*******************
994 * inner_reference *
995 *******************/
996
997 template <class ST>
999 {
1000 using storage_type = std::decay_t<ST>;
1001 using type = std::conditional_t<
1002 std::is_const<std::remove_reference_t<ST>>::value,
1003 typename storage_type::const_reference,
1004 typename storage_type::reference>;
1005 };
1006
1007 template <class ST>
1008 using inner_reference_t = typename inner_reference<ST>::type;
1009
1010 /************
1011 * get_rank *
1012 ************/
1013
1014 template <class E, typename = void>
1016 {
1017 static constexpr std::size_t value = SIZE_MAX;
1018 };
1019
1020 template <class E>
1021 struct get_rank<E, decltype((void) E::rank, void())>
1022 {
1023 static constexpr std::size_t value = E::rank;
1024 };
1025
1026 /******************
1027 * has_fixed_rank *
1028 ******************/
1029
1030 template <class E>
1032 {
1033 using type = std::integral_constant<bool, get_rank<std::decay_t<E>>::value != SIZE_MAX>;
1034 };
1035
1036 template <class E>
1037 using has_fixed_rank_t = typename has_fixed_rank<std::decay_t<E>>::type;
1038
1039 /************
1040 * has_rank *
1041 ************/
1042
1043 template <class E, size_t N>
1045 {
1046 using type = std::integral_constant<bool, get_rank<std::decay_t<E>>::value == N>;
1047 };
1048
1049 template <class E, size_t N>
1050 using has_rank_t = typename has_rank<std::decay_t<E>, N>::type;
1051
1052}
1053
1054#endif
Fixed shape implementation for compile time defined arrays.
auto arg(E &&e) noexcept
Calculates the phase angle (in radians) elementwise for the complex numbers in e.
Definition xcomplex.hpp:221
auto strides(const E &e, stride_type type=stride_type::normal) noexcept
Get strides of an object.
Definition xstrides.hpp:248
standard mathematical functions for xexpressions
auto accumulate(F &&f, E &&e, EVS evaluation_strategy=EVS())
Accumulate and flatten array NOTE This function is not lazy!
auto conditional_cast(U &&u)
Perform a type cast when a condition is true.
Definition xutils.hpp:686