xtensor
 
Loading...
Searching...
No Matches
xutils.hpp
1/***************************************************************************
2 * Copyright (c) Johan Mabille, Sylvain Corlay and Wolf Vollprecht *
3 * Copyright (c) QuantStack *
4 * *
5 * Distributed under the terms of the BSD 3-Clause License. *
6 * *
7 * The full license is in the file LICENSE, distributed with this software. *
8 ****************************************************************************/
9
10#ifndef XTENSOR_UTILS_HPP
11#define XTENSOR_UTILS_HPP
12
13#include <algorithm>
14#include <array>
15#include <cstddef>
16#include <initializer_list>
17#include <iostream>
18#include <memory>
19#include <tuple>
20#include <type_traits>
21#include <utility>
22#include <vector>
23
24#include <xtl/xfunctional.hpp>
25#include <xtl/xmeta_utils.hpp>
26#include <xtl/xsequence.hpp>
27#include <xtl/xtype_traits.hpp>
28
29#include "../core/xtensor_config.hpp"
30
31#if (defined(_MSC_VER) && _MSC_VER >= 1910)
32#define NOEXCEPT(T)
33#else
34#define NOEXCEPT(T) noexcept(T)
35#endif
36
37namespace xt
38{
39 /****************
40 * declarations *
41 ****************/
42
43 template <class T>
44 struct remove_class;
45
46 /*template <class F, class... T>
47 void for_each(F&& f, std::tuple<T...>& t) noexcept(implementation_dependent);*/
48
49 /*template <class F, class R, class... T>
50 R accumulate(F&& f, R init, const std::tuple<T...>& t) noexcept(implementation_dependent);*/
51
52 template <std::size_t I, class... Args>
53 constexpr decltype(auto) argument(Args&&... args) noexcept;
54
55 template <class R, class F, class... S>
56 R apply(std::size_t index, F&& func, const std::tuple<S...>& s) NOEXCEPT(noexcept(func(std::get<0>(s))));
57
58 template <class T, class S>
59 void nested_copy(T&& iter, const S& s);
60
61 template <class T, class S>
62 void nested_copy(T&& iter, std::initializer_list<S> s);
63
64 template <class C>
65 bool resize_container(C& c, typename C::size_type size);
66
67 template <class T, std::size_t N>
68 bool resize_container(std::array<T, N>& a, typename std::array<T, N>::size_type size);
69
70 template <std::size_t... I>
71 class fixed_shape;
72
73 template <std::size_t... I>
74 bool resize_container(fixed_shape<I...>& a, std::size_t size);
75
76 template <class X, class C>
78
79 template <class X, class C>
80 using rebind_container_t = typename rebind_container<X, C>::type;
81
82 std::size_t normalize_axis(std::size_t dim, std::ptrdiff_t axis);
83
84 // gcc 4.9 is affected by C++14 defect CGW 1558
85 // see http://open-std.org/JTC1/SC22/WG21/docs/cwg_defects.html#1558
86 template <class... T>
87 struct make_void
88 {
89 using type = void;
90 };
91
92 template <class... T>
93 using void_t = typename make_void<T...>::type;
94
95 // This is used for non existent types (e.g. storage for some expressions
96 // like generators)
98 {
99 };
100
101 template <class... T>
103 {
104 using type = invalid_type;
105 };
106
107 template <class T, class R>
108 using disable_integral_t = std::enable_if_t<!xtl::is_integral<T>::value, R>;
109
110 /********************************
111 * meta identity implementation *
112 ********************************/
113
114 template <class T>
116 {
117 using type = T;
118 };
119
120 /***************************************
121 * is_specialization_of implementation *
122 ***************************************/
123
124 template <template <class...> class TT, class T>
125 struct is_specialization_of : std::false_type
126 {
127 };
128
129 template <template <class...> class TT, class... Ts>
130 struct is_specialization_of<TT, TT<Ts...>> : std::true_type
131 {
132 };
133
134 /*******************************
135 * remove_class implementation *
136 *******************************/
137
138 template <class T>
140 {
141 };
142
143 template <class C, class R, class... Args>
144 struct remove_class<R (C::*)(Args...)>
145 {
146 typedef R type(Args...);
147 };
148
149 template <class C, class R, class... Args>
150 struct remove_class<R (C::*)(Args...) const>
151 {
152 typedef R type(Args...);
153 };
154
155 template <class T>
156 using remove_class_t = typename remove_class<T>::type;
157
158 /***************************
159 * for_each implementation *
160 ***************************/
161
162 namespace detail
163 {
164 template <class F, size_t... I, class... Ts>
165 void for_each(F&& f, std::tuple<Ts...>& t, std::index_sequence<I...>) noexcept(
166 (noexcept(f(std::get<I>(t))) && ...)
167 )
168 {
169 (f(std::get<I>(t)), ...);
170 }
171
172 template <class F, size_t... I, class... Ts>
173 void for_each(F&& f, const std::tuple<Ts...>& t, std::index_sequence<I...>) noexcept(
174 (noexcept(f(std::get<I>(t))) && ...)
175 )
176 {
177 (f(std::get<I>(t)), ...);
178 }
179 }
180
181 template <class F, class... Ts>
182 inline void for_each(F&& f, std::tuple<Ts...>& t) noexcept(
183 noexcept(detail::for_each(std::forward<F>(f), t, std::make_index_sequence<sizeof...(Ts)>{}))
184 )
185 {
186 detail::for_each(std::forward<F>(f), t, std::make_index_sequence<sizeof...(Ts)>{});
187 }
188
189 template <class F, class... Ts>
190 inline void for_each(F&& f, const std::tuple<Ts...>& t) noexcept(
191 noexcept(detail::for_each(std::forward<F>(f), t, std::make_index_sequence<sizeof...(Ts)>{}))
192 )
193 {
194 detail::for_each(std::forward<F>(f), t, std::make_index_sequence<sizeof...(Ts)>{});
195 }
196
197 /*****************************
198 * accumulate implementation *
199 *****************************/
200
202
203 namespace detail
204 {
205 template <class F, class R, class... T, size_t... I>
206 R accumulate_impl(F&& f, R init, const std::tuple<T...>& t, std::index_sequence<I...> /*I*/) noexcept(
207 (noexcept(f(init, std::get<I>(t))) && ...)
208 )
209 {
210 R res = init;
211 auto wrapper = [&](const auto& i, const auto& j)
212 {
213 res = f(i, j);
214 };
215 (wrapper(res, std::get<I>(t)), ...);
216 return res;
217 }
218 }
219
220 template <class F, class R, class... T>
221 inline R accumulate(F&& f, R init, const std::tuple<T...>& t) noexcept(
222 noexcept(detail::accumulate_impl(std::forward<F>(f), init, t, std::make_index_sequence<sizeof...(T)>{}))
223 )
224 {
225 return detail::accumulate_impl(std::forward<F>(f), init, t, std::make_index_sequence<sizeof...(T)>{});
226 }
227
229
230 /***************************
231 * argument implementation *
232 ***************************/
233
234 namespace detail
235 {
236 template <std::size_t I>
237 struct getter
238 {
239 template <class Arg, class... Args>
240 static constexpr decltype(auto) get(Arg&& /*arg*/, Args&&... args) noexcept
241 {
242 return getter<I - 1>::get(std::forward<Args>(args)...);
243 }
244 };
245
246 template <>
247 struct getter<0>
248 {
249 template <class Arg, class... Args>
250 static constexpr Arg&& get(Arg&& arg, Args&&... /*args*/) noexcept
251 {
252 return std::forward<Arg>(arg);
253 }
254 };
255 }
256
257 template <std::size_t I, class... Args>
258 constexpr decltype(auto) argument(Args&&... args) noexcept
259 {
260 static_assert(I < sizeof...(Args), "I should be lesser than sizeof...(Args)");
261 return detail::getter<I>::get(std::forward<Args>(args)...);
262 }
263
264 /************************
265 * apply implementation *
266 ************************/
267
268 template <class R, class F, class... S>
269 inline R apply(std::size_t index, F&& func, const std::tuple<S...>& s)
270 NOEXCEPT(noexcept(func(std::get<0>(s))))
271 {
272 XTENSOR_ASSERT(sizeof...(S) > index);
273 return std::apply(
274 [&](const S&... args) -> R
275 {
276 auto f_impl = [&](auto&& self, auto&& i, auto&& h, auto&&... t) -> R
277 {
278 if (i == index)
279 {
280 return static_cast<R>(func(h));
281 }
282 if constexpr (sizeof...(t) > 0)
283 {
284 return self(self, std::size_t{i + 1}, t...);
285 }
286 return R{};
287 };
288 return f_impl(f_impl, std::size_t{0}, args...);
289 },
290 s
291 );
292 }
293
294 /***************************
295 * nested_initializer_list *
296 ***************************/
297
298 template <class T, std::size_t I>
300 {
301 using type = std::initializer_list<typename nested_initializer_list<T, I - 1>::type>;
302 };
303
304 template <class T>
306 {
307 using type = T;
308 };
309
310 template <class T, std::size_t I>
311 using nested_initializer_list_t = typename nested_initializer_list<T, I>::type;
312
313 /******************************
314 * nested_copy implementation *
315 ******************************/
316
317 template <class T, class S>
318 inline void nested_copy(T&& iter, const S& s)
319 {
320 *iter++ = s;
321 }
322
323 template <class T, class S>
324 inline void nested_copy(T&& iter, std::initializer_list<S> s)
325 {
326 for (auto it = s.begin(); it != s.end(); ++it)
327 {
328 nested_copy(std::forward<T>(iter), *it);
329 }
330 }
331
332 /***********************************
333 * resize_container implementation *
334 ***********************************/
335 template <class C>
336 inline bool resize_container(C& c, typename C::size_type size)
337 {
338 c.resize(size);
339 return true;
340 }
341
342 template <class T, std::size_t N>
343 inline bool resize_container(std::array<T, N>& /*a*/, typename std::array<T, N>::size_type size)
344 {
345 return size == N;
346 }
347
348 template <std::size_t... I>
349 inline bool resize_container(xt::fixed_shape<I...>&, std::size_t size)
350 {
351 return sizeof...(I) == size;
352 }
353
354 /*********************************
355 * normalize_axis implementation *
356 *********************************/
357
358 // scalar normalize axis
359 inline std::size_t normalize_axis(std::size_t dim, std::ptrdiff_t axis)
360 {
361 return axis < 0 ? static_cast<std::size_t>(static_cast<std::ptrdiff_t>(dim) + axis)
362 : static_cast<std::size_t>(axis);
363 }
364
365 template <class E, class C>
366 inline std::enable_if_t<
367 !xtl::is_integral<std::decay_t<C>>::value && xtl::is_signed<typename std::decay_t<C>::value_type>::value,
368 rebind_container_t<std::size_t, std::decay_t<C>>>
369 normalize_axis(E& expr, C&& axes)
370 {
371 rebind_container_t<std::size_t, std::decay_t<C>> res;
372 resize_container(res, axes.size());
373
374 for (std::size_t i = 0; i < axes.size(); ++i)
375 {
376 res[i] = normalize_axis(expr.dimension(), axes[i]);
377 }
378
379 XTENSOR_ASSERT(std::all_of(
380 res.begin(),
381 res.end(),
382 [&expr](auto ax_el)
383 {
384 return ax_el < expr.dimension();
385 }
386 ));
387
388 return res;
389 }
390
391 template <class C, class E>
392 inline std::enable_if_t<
393 !xtl::is_integral<std::decay_t<C>>::value && std::is_unsigned<typename std::decay_t<C>::value_type>::value,
394 C&&>
395 normalize_axis(E& expr, C&& axes)
396 {
397 static_cast<void>(expr);
398 XTENSOR_ASSERT(std::all_of(
399 axes.begin(),
400 axes.end(),
401 [&expr](auto ax_el)
402 {
403 return ax_el < expr.dimension();
404 }
405 ));
406 return std::forward<C>(axes);
407 }
408
409 template <class R, class E, class C>
410 inline auto forward_normalize(E& expr, C&& axes)
411 -> std::enable_if_t<xtl::is_signed<std::decay_t<decltype(*std::begin(axes))>>::value, R>
412 {
413 R res;
414 xt::resize_container(res, std::size(axes));
415 auto dim = expr.dimension();
416 std::transform(
417 std::begin(axes),
418 std::end(axes),
419 std::begin(res),
420 [&dim](auto ax_el)
421 {
422 return normalize_axis(dim, ax_el);
423 }
424 );
425
426 XTENSOR_ASSERT(std::all_of(
427 res.begin(),
428 res.end(),
429 [&expr](auto ax_el)
430 {
431 return ax_el < expr.dimension();
432 }
433 ));
434
435 return res;
436 }
437
438 template <class R, class E, class C>
439 inline auto forward_normalize(E& expr, C&& axes) -> std::enable_if_t<
440 !xtl::is_signed<std::decay_t<decltype(*std::begin(axes))>>::value && !std::is_same<R, std::decay_t<C>>::value,
441 R>
442 {
443 static_cast<void>(expr);
444
445 R res;
446 xt::resize_container(res, std::size(axes));
447 std::copy(std::begin(axes), std::end(axes), std::begin(res));
448 XTENSOR_ASSERT(std::all_of(
449 res.begin(),
450 res.end(),
451 [&expr](auto ax_el)
452 {
453 return ax_el < expr.dimension();
454 }
455 ));
456 return res;
457 }
458
459 template <class R, class E, class C>
460 inline auto forward_normalize(E& expr, C&& axes) -> std::enable_if_t<
461 !xtl::is_signed<std::decay_t<decltype(*std::begin(axes))>>::value && std::is_same<R, std::decay_t<C>>::value,
462 R&&>
463 {
464 static_cast<void>(expr);
465 XTENSOR_ASSERT(std::all_of(
466 std::begin(axes),
467 std::end(axes),
468 [&expr](auto ax_el)
469 {
470 return ax_el < expr.dimension();
471 }
472 ));
473 return std::move(axes);
474 }
475
476 /******************
477 * get_value_type *
478 ******************/
479
480 template <class T, class = void_t<>>
482 {
483 using type = T;
484 };
485
486 template <class T>
487 struct get_value_type<T, void_t<typename T::value_type>>
488 {
489 using type = typename T::value_type;
490 };
491
492 template <class T>
493 using get_value_type_t = typename get_value_type<T>::type;
494
495 /**********************
496 * get implementation *
497 **********************/
498
499 // When subclassing from std::tuple not all compilers are able to correctly instantiate get
500 // See here: https://stackoverflow.com/a/37188019/2528668
501 template <std::size_t I, template <typename... Args> class T, typename... Args>
502 decltype(auto) get(T<Args...>&& v)
503 {
504 return std::get<I>(static_cast<std::tuple<Args...>&&>(v));
505 }
506
507 template <std::size_t I, template <typename... Args> class T, typename... Args>
508 decltype(auto) get(T<Args...>& v)
509 {
510 return std::get<I>(static_cast<std::tuple<Args...>&>(v));
511 }
512
513 template <std::size_t I, template <typename... Args> class T, typename... Args>
514 decltype(auto) get(const T<Args...>& v)
515 {
516 return std::get<I>(static_cast<const std::tuple<Args...>&>(v));
517 }
518
519 /**************************
520 * to_array implementation *
521 ***************************/
522
523 namespace detail
524 {
525 template <class T, std::size_t N, std::size_t... I>
526 constexpr std::array<std::remove_cv_t<T>, N> to_array_impl(T (&a)[N], std::index_sequence<I...>)
527 {
528 return {{a[I]...}};
529 }
530 }
531
532 template <class T, std::size_t N>
533 constexpr std::array<std::remove_cv_t<T>, N> to_array(T (&a)[N])
534 {
535 return detail::to_array_impl(a, std::make_index_sequence<N>{});
536 }
537
538 /***********************************
539 * has_storage_type implementation *
540 ***********************************/
541
542 template <class T, class = void>
543 struct has_storage_type : std::false_type
544 {
545 };
546
547 template <class T>
549
550 template <class T>
551 struct has_storage_type<T, void_t<typename xcontainer_inner_types<T>::storage_type>>
552 : std::negation<
553 std::is_same<typename std::remove_cv<typename xcontainer_inner_types<T>::storage_type>::type, invalid_type>>
554 {
555 };
556
557 /*************************************
558 * has_data_interface implementation *
559 *************************************/
560
561 template <class E, class = void>
562 struct has_data_interface : std::false_type
563 {
564 };
565
566 template <class E>
567 struct has_data_interface<E, void_t<decltype(std::declval<E>().data())>> : std::true_type
568 {
569 };
570
571 template <class E>
573
574 template <class E, class = void>
575 struct has_strides : std::false_type
576 {
577 };
578
579 template <class E>
580 struct has_strides<E, void_t<decltype(std::declval<E>().strides())>> : std::true_type
581 {
582 };
583
584 template <class E, class = void>
585 struct has_iterator_interface : std::false_type
586 {
587 };
588
589 template <class E>
590 struct has_iterator_interface<E, void_t<decltype(std::declval<E>().begin())>> : std::true_type
591 {
592 };
593
594 template <class E>
596
597 /******************************
598 * is_iterator implementation *
599 ******************************/
600
601 template <class E, class = void>
602 struct is_iterator : std::false_type
603 {
604 };
605
606 template <class E>
608 E,
609 void_t<
610 decltype(*std::declval<const E>(), std::declval<const E>() == std::declval<const E>(), std::declval<const E>() != std::declval<const E>(), ++(*std::declval<E*>()), (*std::declval<E*>())++, std::true_type())>>
611 : std::true_type
612 {
613 };
614
615 template <typename E>
617
618 /********************************************
619 * xtrivial_default_construct implemenation *
620 ********************************************/
621
622#if defined(_GLIBCXX_RELEASE) && _GLIBCXX_RELEASE >= 7
623// has_trivial_default_constructor has not been available since libstdc++-7.
624#define XTENSOR_GLIBCXX_USE_CXX11_ABI 1
625#else
626#if defined(_GLIBCXX_USE_CXX11_ABI)
627#if _GLIBCXX_USE_CXX11_ABI || (defined(_GLIBCXX_USE_DUAL_ABI) && !_GLIBCXX_USE_DUAL_ABI)
628#define XTENSOR_GLIBCXX_USE_CXX11_ABI 1
629#endif
630#endif
631#endif
632
633#if !defined(__GNUG__) || defined(_LIBCPP_VERSION) || defined(XTENSOR_GLIBCXX_USE_CXX11_ABI)
634
635 template <class T>
636 using xtrivially_default_constructible = std::is_trivially_default_constructible<T>;
637
638#else
639
640 template <class T>
641 using xtrivially_default_constructible = std::has_trivial_default_constructor<T>;
642
643#endif
644#undef XTENSOR_GLIBCXX_USE_CXX11_ABI
645
646 /*************************
647 * conditional type cast *
648 *************************/
649
650 template <bool condition, class T>
652
653 template <class T>
654 struct conditional_cast_functor<false, T> : public xtl::identity
655 {
656 };
657
658 template <class T>
660 {
661 template <class U>
662 inline auto operator()(U&& u) const
663 {
664 return static_cast<T>(std::forward<U>(u));
665 }
666 };
667
676 template <bool condition, class T, class U>
677 inline auto conditional_cast(U&& u)
678 {
679 return conditional_cast_functor<condition, T>()(std::forward<U>(u));
680 }
681
682 /**********************
683 * tracking allocator *
684 **********************/
685
686 namespace alloc_tracking
687 {
688 inline bool& enabled()
689 {
690 static bool enabled;
691 return enabled;
692 }
693
694 inline void enable()
695 {
696 enabled() = true;
697 }
698
699 inline void disable()
700 {
701 enabled() = false;
702 }
703
704 enum policy
705 {
706 print,
707 assert
708 };
709 }
710
711 template <class T, class A, alloc_tracking::policy P>
712 struct tracking_allocator : private A
713 {
714 using base_type = A;
715 using value_type = typename A::value_type;
716 using reference = value_type&;
717 using const_reference = const value_type&;
718 using pointer = typename std::allocator_traits<A>::pointer;
719 using const_pointer = typename std::allocator_traits<A>::const_pointer;
720 using size_type = typename std::allocator_traits<A>::size_type;
721 using difference_type = typename std::allocator_traits<A>::difference_type;
722
723 tracking_allocator() = default;
724
725 T* allocate(std::size_t n)
726 {
727 if (alloc_tracking::enabled())
728 {
729 if (P == alloc_tracking::print)
730 {
731 std::cout << "xtensor allocating: " << n << "" << std::endl;
732 }
733 else if (P == alloc_tracking::assert)
734 {
735 XTENSOR_THROW(
736 std::runtime_error,
737 "xtensor allocation of " + std::to_string(n) + " elements detected"
738 );
739 }
740 }
741 return base_type::allocate(n);
742 }
743
744 using base_type::deallocate;
745
746// Construct and destroy are removed in --std=c++-20
747#if ((defined(__cplusplus) && __cplusplus < 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG < 202002L))
748 using base_type::construct;
749 using base_type::destroy;
750#endif
751
752 template <class U>
753 struct rebind
754 {
755 using traits = std::allocator_traits<A>;
756 using other = tracking_allocator<U, typename traits::template rebind_alloc<U>, P>;
757 };
758 };
759
760 template <class T, class AT, alloc_tracking::policy PT, class U, class AU, alloc_tracking::policy PU>
761 inline bool operator==(const tracking_allocator<T, AT, PT>&, const tracking_allocator<U, AU, PU>&)
762 {
763 return std::is_same<AT, AU>::value;
764 }
765
766 template <class T, class AT, alloc_tracking::policy PT, class U, class AU, alloc_tracking::policy PU>
767 inline bool operator!=(const tracking_allocator<T, AT, PT>& a, const tracking_allocator<U, AU, PU>& b)
768 {
769 return !(a == b);
770 }
771
772 /*****************
773 * has_assign_to *
774 *****************/
775
776 template <class E1, class E2, class = void>
777 struct has_assign_to : std::false_type
778 {
779 };
780
781 template <class E1, class E2>
782 struct has_assign_to<E1, E2, void_t<decltype(std::declval<const E2&>().assign_to(std::declval<E1&>()))>>
783 : std::true_type
784 {
785 };
786
787 template <class E1, class E2>
788 constexpr bool has_assign_to_v = has_assign_to<E1, E2>::value;
789
790 /*************************************
791 * overlapping_memory_checker_traits *
792 *************************************/
793
794 template <class T, class Enable = void>
795 struct has_memory_address : std::false_type
796 {
797 };
798
799 template <class T>
800 struct has_memory_address<T, void_t<decltype(std::addressof(*std::declval<T>().begin()))>> : std::true_type
801 {
802 };
803
804 template <typename T>
806 template <typename T>
808
809 struct memory_range
810 {
811 // Checking pointer overlap is more correct in integer values,
812 // for more explanation check https://devblogs.microsoft.com/oldnewthing/20170927-00/?p=97095
813 const uintptr_t m_first = 0;
814 const uintptr_t m_last = 0;
815
816 explicit memory_range() = default;
817
818 template <class T>
819 explicit memory_range(T* first, T* last)
820 : m_first(reinterpret_cast<uintptr_t>(last < first ? last : first))
821 , m_last(reinterpret_cast<uintptr_t>(last < first ? first : last))
822 {
823 }
824
825 template <class T>
826 bool overlaps(T* first, T* last) const
827 {
828 if (first <= last)
829 {
830 return reinterpret_cast<uintptr_t>(first) <= m_last
831 && reinterpret_cast<uintptr_t>(last) >= m_first;
832 }
833 else
834 {
835 return reinterpret_cast<uintptr_t>(last) <= m_last
836 && reinterpret_cast<uintptr_t>(first) >= m_first;
837 }
838 }
839 };
840
841 template <class E, class Enable = void>
843 {
844 static bool check_overlap(const E&, const memory_range&)
845 {
846 return true;
847 }
848 };
849
850 template <class E>
851 struct overlapping_memory_checker_traits<E, std::enable_if_t<has_memory_address<E>::value>>
852 {
853 static bool check_overlap(const E& expr, const memory_range& dst_range)
854 {
855 if (expr.size() == 0)
856 {
857 return false;
858 }
859 else
860 {
861 return dst_range.overlaps(std::addressof(*expr.begin()), std::addressof(*expr.rbegin()));
862 }
863 }
864 };
865
866 struct overlapping_memory_checker_base
867 {
868 memory_range m_dst_range;
869
870 explicit overlapping_memory_checker_base() = default;
871
872 explicit overlapping_memory_checker_base(memory_range dst_memory_range)
873 : m_dst_range(std::move(dst_memory_range))
874 {
875 }
876
877 template <class E>
878 bool check_overlap(const E& expr) const
879 {
880 if (!m_dst_range.m_first || !m_dst_range.m_last)
881 {
882 return false;
883 }
884 else
885 {
886 return overlapping_memory_checker_traits<E>::check_overlap(expr, m_dst_range);
887 }
888 }
889 };
890
891 template <class Dst, class Enable = void>
892 struct overlapping_memory_checker : overlapping_memory_checker_base
893 {
894 explicit overlapping_memory_checker(const Dst&)
895 : overlapping_memory_checker_base()
896 {
897 }
898 };
899
900 template <class Dst>
901 struct overlapping_memory_checker<Dst, std::enable_if_t<has_memory_address<Dst>::value>>
902 : overlapping_memory_checker_base
903 {
904 explicit overlapping_memory_checker(const Dst& aDst)
905 : overlapping_memory_checker_base(
906 [&]()
907 {
908 if (aDst.size() == 0)
909 {
910 return memory_range();
911 }
912 else
913 {
914 return memory_range(std::addressof(*aDst.begin()), std::addressof(*aDst.rbegin()));
915 }
916 }()
917 )
918 {
919 }
920 };
921
922 template <class Dst>
923 auto make_overlapping_memory_checker(const Dst& a_dst)
924 {
926 }
927
928 /********************
929 * rebind_container *
930 ********************/
931
932 template <class X, template <class, class> class C, class T, class A>
933 struct rebind_container<X, C<T, A>>
934 {
935 using traits = std::allocator_traits<A>;
936 using allocator = typename traits::template rebind_alloc<X>;
937 using type = C<X, allocator>;
938 };
939
940// Workaround for rebind_container problems when C++17 feature is enabled
941#ifdef __cpp_template_template_args
942 template <class X, class T, std::size_t N>
943 struct rebind_container<X, std::array<T, N>>
944 {
945 using type = std::array<X, N>;
946 };
947#else
948 template <class X, template <class, std::size_t> class C, class T, std::size_t N>
949 struct rebind_container<X, C<T, N>>
950 {
951 using type = C<X, N>;
952 };
953#endif
954
955 /********************
956 * get_strides_type *
957 ********************/
958
959 template <class S>
961 {
962 using type = typename rebind_container<std::ptrdiff_t, S>::type;
963 };
964
965 template <std::size_t... I>
967 {
968 // TODO we could compute the strides statically here.
969 // But we'll need full constexpr support to have a
970 // homogenous ``compute_strides`` method
971 using type = std::array<std::ptrdiff_t, sizeof...(I)>;
972 };
973
974 template <class CP, class O, class A>
975 class xbuffer_adaptor;
976
977 template <class CP, class O, class A>
979 {
980 // In bindings this mapping is called by reshape_view with an inner shape of type
981 // xbuffer_adaptor.
982 // Since we cannot create a buffer adaptor holding data, we map it to an std::vector.
983 using type = std::vector<
984 typename xbuffer_adaptor<CP, O, A>::value_type,
985 typename xbuffer_adaptor<CP, O, A>::allocator_type>;
986 };
987
988
989 template <class C>
990 using get_strides_t = typename get_strides_type<C>::type;
991
992 /*******************
993 * inner_reference *
994 *******************/
995
996 template <class ST>
998 {
999 using storage_type = std::decay_t<ST>;
1000 using type = std::conditional_t<
1001 std::is_const<std::remove_reference_t<ST>>::value,
1002 typename storage_type::const_reference,
1003 typename storage_type::reference>;
1004 };
1005
1006 template <class ST>
1007 using inner_reference_t = typename inner_reference<ST>::type;
1008
1009 /************
1010 * get_rank *
1011 ************/
1012
1013 template <class E, typename = void>
1015 {
1016 static constexpr std::size_t value = SIZE_MAX;
1017 };
1018
1019 template <class E>
1020 struct get_rank<E, decltype((void) E::rank, void())>
1021 {
1022 static constexpr std::size_t value = E::rank;
1023 };
1024
1025 /******************
1026 * has_fixed_rank *
1027 ******************/
1028
1029 template <class E>
1031 {
1032 using type = std::integral_constant<bool, get_rank<std::decay_t<E>>::value != SIZE_MAX>;
1033 };
1034
1035 template <class E>
1036 using has_fixed_rank_t = typename has_fixed_rank<std::decay_t<E>>::type;
1037
1038 /************
1039 * has_rank *
1040 ************/
1041
1042 template <class E, size_t N>
1044 {
1045 using type = std::integral_constant<bool, get_rank<std::decay_t<E>>::value == N>;
1046 };
1047
1048 template <class E, size_t N>
1049 using has_rank_t = typename has_rank<std::decay_t<E>, N>::type;
1050
1051}
1052
1053#endif
Fixed shape implementation for compile time defined arrays.
auto arg(E &&e) noexcept
Calculates the phase angle (in radians) elementwise for the complex numbers in e.
Definition xcomplex.hpp:221
auto strides(const E &e, stride_type type=stride_type::normal) noexcept
Get strides of an object.
Definition xstrides.hpp:250
standard mathematical functions for xexpressions
auto accumulate(F &&f, E &&e, EVS evaluation_strategy=EVS())
Accumulate and flatten array NOTE This function is not lazy!
auto conditional_cast(U &&u)
Perform a type cast when a condition is true.
Definition xutils.hpp:677