xtensor
Loading...
Searching...
No Matches
xstrided_view_base.hpp
1/***************************************************************************
2 * Copyright (c) Johan Mabille, Sylvain Corlay and Wolf Vollprecht *
3 * Copyright (c) QuantStack *
4 * *
5 * Distributed under the terms of the BSD 3-Clause License. *
6 * *
7 * The full license is in the file LICENSE, distributed with this software. *
8 ****************************************************************************/
9
10#ifndef XTENSOR_STRIDED_VIEW_BASE_HPP
11#define XTENSOR_STRIDED_VIEW_BASE_HPP
12
13#include <type_traits>
14
15#include <xtl/xsequence.hpp>
16#include <xtl/xvariant.hpp>
17
18#include "xaccessible.hpp"
19#include "xslice.hpp"
20#include "xstrides.hpp"
21#include "xtensor_config.hpp"
22#include "xtensor_forward.hpp"
23#include "xutils.hpp"
24
25namespace xt
26{
27 namespace detail
28 {
29 template <class CT, layout_type L>
30 class flat_expression_adaptor
31 {
32 public:
33
34 using xexpression_type = std::decay_t<CT>;
35 using shape_type = typename xexpression_type::shape_type;
36 using inner_strides_type = get_strides_t<shape_type>;
37 using index_type = inner_strides_type;
38 using size_type = typename xexpression_type::size_type;
39 using value_type = typename xexpression_type::value_type;
40 using const_reference = typename xexpression_type::const_reference;
41 using reference = std::conditional_t<
42 std::is_const<std::remove_reference_t<CT>>::value,
43 typename xexpression_type::const_reference,
44 typename xexpression_type::reference>;
45
46 using iterator = decltype(std::declval<std::remove_reference_t<CT>>().template begin<L>());
47 using const_iterator = decltype(std::declval<std::decay_t<CT>>().template cbegin<L>());
48 using reverse_iterator = decltype(std::declval<std::remove_reference_t<CT>>().template rbegin<L>());
49 using const_reverse_iterator = decltype(std::declval<std::decay_t<CT>>().template crbegin<L>());
50
51 explicit flat_expression_adaptor(CT* e);
52
53 template <class FST>
54 flat_expression_adaptor(CT* e, FST&& strides);
55
56 void update_pointer(CT* ptr) const;
57
58 size_type size() const;
59 reference operator[](size_type idx);
60 const_reference operator[](size_type idx) const;
61
62 iterator begin();
63 iterator end();
64 const_iterator begin() const;
65 const_iterator end() const;
66 const_iterator cbegin() const;
67 const_iterator cend() const;
68
69 private:
70
71 static index_type& get_index();
72
73 mutable CT* m_e;
74 inner_strides_type m_strides;
75 size_type m_size;
76 };
77
78 template <class T>
79 struct is_flat_expression_adaptor : std::false_type
80 {
81 };
82
83 template <class CT, layout_type L>
84 struct is_flat_expression_adaptor<flat_expression_adaptor<CT, L>> : std::true_type
85 {
86 };
87
88 template <class E, class ST>
89 struct provides_data_interface
90 : xtl::conjunction<has_data_interface<std::decay_t<E>>, xtl::negation<is_flat_expression_adaptor<ST>>>
91 {
92 };
93 }
94
95 template <class D>
97 {
98 public:
99
102 using xexpression_type = typename inner_types::xexpression_type;
103 using undecay_expression = typename inner_types::undecay_expression;
104 static constexpr bool is_const = std::is_const<std::remove_reference_t<undecay_expression>>::value;
105
106 using value_type = typename xexpression_type::value_type;
107 using reference = typename inner_types::reference;
108 using const_reference = typename inner_types::const_reference;
109 using pointer = std::
110 conditional_t<is_const, typename xexpression_type::const_pointer, typename xexpression_type::pointer>;
111 using const_pointer = typename xexpression_type::const_pointer;
112 using size_type = typename inner_types::size_type;
113 using difference_type = typename xexpression_type::difference_type;
114
115 using storage_getter = typename inner_types::storage_getter;
116 using inner_storage_type = typename inner_types::inner_storage_type;
117 using storage_type = std::remove_reference_t<inner_storage_type>;
118
119 using shape_type = typename inner_types::shape_type;
120 using strides_type = get_strides_t<shape_type>;
121 using backstrides_type = strides_type;
122
123 using inner_shape_type = shape_type;
124 using inner_strides_type = strides_type;
125 using inner_backstrides_type = backstrides_type;
126
127 using undecay_shape = typename inner_types::undecay_shape;
128
129 using simd_value_type = xt_simd::simd_type<value_type>;
130 using bool_load_type = typename xexpression_type::bool_load_type;
131
132 static constexpr layout_type static_layout = inner_types::layout;
133 static constexpr bool contiguous_layout = static_layout != layout_type::dynamic
134 && xexpression_type::contiguous_layout;
135
136 template <class CTA, class SA>
137 xstrided_view_base(CTA&& e, SA&& shape, strides_type&& strides, size_type offset, layout_type layout) noexcept;
138
140
142
143 const inner_shape_type& shape() const noexcept;
144 const inner_strides_type& strides() const noexcept;
145 const inner_backstrides_type& backstrides() const noexcept;
147 bool is_contiguous() const noexcept;
149
150 reference operator()();
151 const_reference operator()() const;
152
153 template <class... Args>
154 reference operator()(Args... args);
155
156 template <class... Args>
157 const_reference operator()(Args... args) const;
158
159 template <class... Args>
160 reference unchecked(Args... args);
161
162 template <class... Args>
163 const_reference unchecked(Args... args) const;
164
166 reference element(It first, It last);
167
169 const_reference element(It first, It last) const;
170
171 storage_type& storage() noexcept;
172 const storage_type& storage() const noexcept;
173
174 template <class E = xexpression_type, class ST = storage_type>
175 std::enable_if_t<detail::provides_data_interface<E, ST>::value, pointer> data() noexcept;
176 template <class E = xexpression_type, class ST = storage_type>
177 std::enable_if_t<detail::provides_data_interface<E, ST>::value, const_pointer> data() const noexcept;
179
180 xexpression_type& expression() noexcept;
181 const xexpression_type& expression() const noexcept;
182
185
188
189 protected:
190
191 using offset_type = typename strides_type::value_type;
192
193 template <class... Args>
194 offset_type compute_index(Args... args) const;
195
197 offset_type compute_unchecked_index(Args... args) const;
198
200 offset_type compute_element_index(It first, It last) const;
201
202 void set_offset(size_type offset);
203
204 private:
205
206 undecay_expression m_e;
207 inner_storage_type m_storage;
208 inner_shape_type m_shape;
209 inner_strides_type m_strides;
210 inner_backstrides_type m_backstrides;
211 size_type m_offset;
212 layout_type m_layout;
213 };
215 /***************************
216 * flat_expression_adaptor *
217 ***************************/
218
219 namespace detail
220 {
221 template <class CT>
222 struct inner_storage_getter
223 {
224 using type = decltype(std::declval<CT>().storage());
225 using reference = std::add_lvalue_reference_t<CT>;
226
227 template <class E>
229
230 static decltype(auto) get_flat_storage(reference e)
231 {
232 return e.storage();
233 }
234
235 static auto get_offset(reference e)
236 {
237 return e.data_offset();
238 }
239
240 static decltype(auto) get_strides(reference e)
241 {
242 return e.strides();
243 }
244 };
245
246 template <class CT, layout_type L>
247 struct flat_adaptor_getter
248 {
250 using reference = std::add_lvalue_reference_t<CT>;
251
252 template <class E>
254
255 static type get_flat_storage(reference e)
256 {
257 // moved to addressof because ampersand on xview returns a closure pointer
258 return type(std::addressof(e));
259 }
260
261 static auto get_offset(reference)
262 {
263 return typename std::decay_t<CT>::size_type(0);
264 }
265
266 static auto get_strides(reference e)
267 {
269 strides.resize(e.shape().size());
270 compute_strides(e.shape(), L, strides);
271 return strides;
272 }
273 };
274
275 template <class CT, layout_type L>
276 using flat_storage_getter = std::conditional_t<
280
281 template <layout_type L, class E>
282 inline auto get_offset(E& e)
283 {
284 return flat_storage_getter<E, L>::get_offset(e);
285 }
286
287 template <layout_type L, class E>
288 inline decltype(auto) get_strides(E& e)
289 {
290 return flat_storage_getter<E, L>::get_strides(e);
291 }
292 }
293
294 /*************************************
295 * xstrided_view_base implementation *
296 *************************************/
297
311 template <class D>
312 template <class CTA, class SA>
314 CTA&& e,
315 SA&& shape,
316 strides_type&& strides,
317 size_type offset,
319 ) noexcept
320 : m_e(std::forward<CTA>(e))
321 ,
322 // m_storage(detail::get_flat_storage<undecay_expression>(m_e)),
323 m_storage(storage_getter::get_flat_storage(m_e))
324 , m_shape(std::forward<SA>(shape))
325 , m_strides(std::move(strides))
326 , m_offset(offset)
327 , m_layout(layout)
328 {
329 m_backstrides = xtl::make_sequence<backstrides_type>(m_shape.size(), 0);
330 adapt_strides(m_shape, m_strides, m_backstrides);
331 }
332
333 namespace detail
334 {
335 template <class T, class S>
336 auto& copy_move_storage(T& expr, const S& /*storage*/)
337 {
338 return expr.storage();
339 }
340
341 template <class T, class E, layout_type L>
342 auto copy_move_storage(T& expr, const detail::flat_expression_adaptor<E, L>& storage)
343 {
344 detail::flat_expression_adaptor<E, L> new_storage = storage; // copy storage
345 new_storage.update_pointer(std::addressof(expr));
346 return new_storage;
347 }
348 }
349
350 template <class D>
352 : base_type(std::move(rhs))
353 , m_e(std::forward<undecay_expression>(rhs.m_e))
354 , m_storage(detail::copy_move_storage(m_e, rhs.m_storage))
355 , m_shape(std::move(rhs.m_shape))
356 , m_strides(std::move(rhs.m_strides))
357 , m_backstrides(std::move(rhs.m_backstrides))
358 , m_offset(std::move(rhs.m_offset))
359 , m_layout(std::move(rhs.m_layout))
360 {
361 }
362
363 template <class D>
364 inline xstrided_view_base<D>::xstrided_view_base(const xstrided_view_base& rhs)
365 : base_type(rhs)
366 , m_e(rhs.m_e)
367 , m_storage(detail::copy_move_storage(m_e, rhs.m_storage))
368 , m_shape(rhs.m_shape)
369 , m_strides(rhs.m_strides)
370 , m_backstrides(rhs.m_backstrides)
371 , m_offset(rhs.m_offset)
372 , m_layout(rhs.m_layout)
373 {
374 }
375
377
385 template <class D>
386 inline auto xstrided_view_base<D>::shape() const noexcept -> const inner_shape_type&
387 {
388 return m_shape;
389 }
390
394 template <class D>
396 {
397 return m_strides;
398 }
399
403 template <class D>
405 {
406 return m_backstrides;
407 }
408
412 template <class D>
414 {
415 return m_layout;
416 }
417
418 template <class D>
420 {
421 return m_layout != layout_type::dynamic && m_e.is_contiguous();
422 }
423
425
430 template <class D>
431 inline auto xstrided_view_base<D>::operator()() -> reference
432 {
433 return m_storage[static_cast<size_type>(m_offset)];
434 }
435
436 template <class D>
437 inline auto xstrided_view_base<D>::operator()() const -> const_reference
438 {
439 return m_storage[static_cast<size_type>(m_offset)];
440 }
441
448 template <class D>
449 template <class... Args>
450 inline auto xstrided_view_base<D>::operator()(Args... args) -> reference
451 {
452 XTENSOR_TRY(check_index(shape(), args...));
453 XTENSOR_CHECK_DIMENSION(shape(), args...);
454 offset_type index = compute_index(args...);
455 return m_storage[static_cast<size_type>(index)];
456 }
457
464 template <class D>
465 template <class... Args>
466 inline auto xstrided_view_base<D>::operator()(Args... args) const -> const_reference
467 {
468 XTENSOR_TRY(check_index(shape(), args...));
469 XTENSOR_CHECK_DIMENSION(shape(), args...);
470 offset_type index = compute_index(args...);
471 return m_storage[static_cast<size_type>(index)];
472 }
473
493 template <class D>
494 template <class... Args>
495 inline auto xstrided_view_base<D>::unchecked(Args... args) -> reference
496 {
497 offset_type index = compute_unchecked_index(args...);
498 return m_storage[static_cast<size_type>(index)];
499 }
500
520 template <class D>
521 template <class... Args>
522 inline auto xstrided_view_base<D>::unchecked(Args... args) const -> const_reference
523 {
524 offset_type index = compute_unchecked_index(args...);
525 return m_storage[static_cast<size_type>(index)];
526 }
527
535 template <class D>
536 template <class It>
537 inline auto xstrided_view_base<D>::element(It first, It last) -> reference
538 {
539 XTENSOR_TRY(check_element_index(shape(), first, last));
540 return m_storage[static_cast<size_type>(compute_element_index(first, last))];
541 }
542
550 template <class D>
551 template <class It>
552 inline auto xstrided_view_base<D>::element(It first, It last) const -> const_reference
553 {
554 XTENSOR_TRY(check_element_index(shape(), first, last));
555 return m_storage[static_cast<size_type>(compute_element_index(first, last))];
556 }
557
561 template <class D>
562 inline auto xstrided_view_base<D>::storage() noexcept -> storage_type&
563 {
564 return m_storage;
565 }
566
570 template <class D>
571 inline auto xstrided_view_base<D>::storage() const noexcept -> const storage_type&
572 {
573 return m_storage;
574 }
575
580 template <class D>
581 template <class E, class ST>
583 -> std::enable_if_t<detail::provides_data_interface<E, ST>::value, pointer>
584 {
585 return m_e.data();
586 }
587
592 template <class D>
593 template <class E, class ST>
595 -> std::enable_if_t<detail::provides_data_interface<E, ST>::value, const_pointer>
596 {
597 return m_e.data();
598 }
599
603 template <class D>
605 {
606 return m_offset;
607 }
608
612 template <class D>
613 inline auto xstrided_view_base<D>::expression() noexcept -> xexpression_type&
614 {
615 return m_e;
616 }
617
621 template <class D>
622 inline auto xstrided_view_base<D>::expression() const noexcept -> const xexpression_type&
623 {
624 return m_e;
625 }
626
628
639 template <class D>
640 template <class O>
641 inline bool xstrided_view_base<D>::broadcast_shape(O& shape, bool) const
642 {
643 return xt::broadcast_shape(m_shape, shape);
644 }
645
651 template <class D>
652 template <class O>
653 inline bool xstrided_view_base<D>::has_linear_assign(const O& str) const noexcept
654 {
655 return has_data_interface<xexpression_type>::value && str.size() == strides().size()
656 && std::equal(str.cbegin(), str.cend(), strides().begin());
657 }
658
660
661 template <class D>
662 template <class... Args>
663 inline auto xstrided_view_base<D>::compute_index(Args... args) const -> offset_type
664 {
665 return static_cast<offset_type>(m_offset)
666 + xt::data_offset<offset_type>(strides(), static_cast<offset_type>(args)...);
667 }
668
669 template <class D>
670 template <class... Args>
671 inline auto xstrided_view_base<D>::compute_unchecked_index(Args... args) const -> offset_type
672 {
673 return static_cast<offset_type>(m_offset)
674 + xt::unchecked_data_offset<offset_type>(strides(), static_cast<offset_type>(args)...);
675 }
676
677 template <class D>
678 template <class It>
679 inline auto xstrided_view_base<D>::compute_element_index(It first, It last) const -> offset_type
680 {
681 return static_cast<offset_type>(m_offset) + xt::element_offset<offset_type>(strides(), first, last);
682 }
683
684 template <class D>
685 void xstrided_view_base<D>::set_offset(size_type offset)
686 {
687 m_offset = offset;
688 }
689
690 /******************************************
691 * flat_expression_adaptor implementation *
692 ******************************************/
693
694 namespace detail
695 {
696 template <class CT, layout_type L>
697 inline flat_expression_adaptor<CT, L>::flat_expression_adaptor(CT* e)
698 : m_e(e)
699 {
700 resize_container(get_index(), m_e->dimension());
701 resize_container(m_strides, m_e->dimension());
702 m_size = compute_size(m_e->shape());
703 compute_strides(m_e->shape(), L, m_strides);
704 }
705
706 template <class CT, layout_type L>
707 template <class FST>
708 inline flat_expression_adaptor<CT, L>::flat_expression_adaptor(CT* e, FST&& strides)
709 : m_e(e)
710 , m_strides(xtl::forward_sequence<inner_strides_type, FST>(strides))
711 {
712 resize_container(get_index(), m_e->dimension());
713 m_size = m_e->size();
714 }
715
716 template <class CT, layout_type L>
717 inline void flat_expression_adaptor<CT, L>::update_pointer(CT* ptr) const
718 {
719 m_e = ptr;
720 }
721
722 template <class CT, layout_type L>
723 inline auto flat_expression_adaptor<CT, L>::size() const -> size_type
724 {
725 return m_size;
726 }
727
728 template <class CT, layout_type L>
729 inline auto flat_expression_adaptor<CT, L>::operator[](size_type idx) -> reference
730 {
731 auto i = static_cast<typename index_type::value_type>(idx);
732 get_index() = detail::unravel_noexcept(i, m_strides, L);
733 return m_e->element(get_index().cbegin(), get_index().cend());
734 }
735
736 template <class CT, layout_type L>
737 inline auto flat_expression_adaptor<CT, L>::operator[](size_type idx) const -> const_reference
738 {
739 auto i = static_cast<typename index_type::value_type>(idx);
740 get_index() = detail::unravel_noexcept(i, m_strides, L);
741 return m_e->element(get_index().cbegin(), get_index().cend());
742 }
743
744 template <class CT, layout_type L>
745 inline auto flat_expression_adaptor<CT, L>::begin() -> iterator
746 {
747 return m_e->template begin<L>();
748 }
749
750 template <class CT, layout_type L>
751 inline auto flat_expression_adaptor<CT, L>::end() -> iterator
752 {
753 return m_e->template end<L>();
754 }
755
756 template <class CT, layout_type L>
757 inline auto flat_expression_adaptor<CT, L>::begin() const -> const_iterator
758 {
759 return m_e->template cbegin<L>();
760 }
761
762 template <class CT, layout_type L>
763 inline auto flat_expression_adaptor<CT, L>::end() const -> const_iterator
764 {
765 return m_e->template cend<L>();
766 }
767
768 template <class CT, layout_type L>
769 inline auto flat_expression_adaptor<CT, L>::cbegin() const -> const_iterator
770 {
771 return m_e->template cbegin<L>();
772 }
773
774 template <class CT, layout_type L>
775 inline auto flat_expression_adaptor<CT, L>::cend() const -> const_iterator
776 {
777 return m_e->template cend<L>();
778 }
779
780 template <class CT, layout_type L>
781 inline auto flat_expression_adaptor<CT, L>::get_index() -> index_type&
782 {
783 thread_local static index_type index;
784 return index;
785 }
786 }
787
788 /**********************************
789 * Builder helpers implementation *
790 **********************************/
791
792 namespace detail
793 {
794 template <class S>
795 struct slice_getter_impl
796 {
797 const S& m_shape;
798 mutable std::size_t idx;
799 using array_type = std::array<std::ptrdiff_t, 3>;
800
801 explicit slice_getter_impl(const S& shape)
802 : m_shape(shape)
803 , idx(0)
804 {
805 }
806
807 template <class T>
808 array_type operator()(const T& /*t*/) const
809 {
810 return array_type{{0, 0, 0}};
811 }
812
813 template <class A, class B, class C>
814 array_type operator()(const xrange_adaptor<A, B, C>& range) const
815 {
816 auto sl = range.get(static_cast<std::size_t>(m_shape[idx]));
817 return array_type({sl(0), sl.size(), sl.step_size()});
818 }
819
820 template <class T>
821 array_type operator()(const xrange<T>& range) const
822 {
823 return array_type({range(T(0)), range.size(), T(1)});
824 }
825
826 template <class T>
827 array_type operator()(const xstepped_range<T>& range) const
828 {
829 return array_type({range(T(0)), range.size(), range.step_size(T(0))});
830 }
831 };
832
833 template <class adj_strides_policy>
834 struct strided_view_args : adj_strides_policy
835 {
836 using base_type = adj_strides_policy;
837
838 template <class S, class ST, class V>
839 void
840 fill_args(const S& shape, ST&& old_strides, std::size_t base_offset, layout_type layout, const V& slices)
841 {
842 // Compute dimension
843 std::size_t dimension = shape.size(), n_newaxis = 0, n_add_all = 0;
844 std::ptrdiff_t dimension_check = static_cast<std::ptrdiff_t>(shape.size());
845
846 bool has_ellipsis = false;
847 for (const auto& el : slices)
848 {
849 if (xtl::get_if<xt::xnewaxis_tag>(&el) != nullptr)
850 {
851 ++dimension;
852 ++n_newaxis;
853 }
854 else if (xtl::get_if<std::ptrdiff_t>(&el) != nullptr)
855 {
856 --dimension;
857 --dimension_check;
858 }
859 else if (xtl::get_if<xt::xellipsis_tag>(&el) != nullptr)
860 {
861 if (has_ellipsis == true)
862 {
863 XTENSOR_THROW(std::runtime_error, "Ellipsis can only appear once.");
864 }
865 has_ellipsis = true;
866 }
867 else
868 {
869 --dimension_check;
870 }
871 }
872
873 if (dimension_check < 0)
874 {
875 XTENSOR_THROW(std::runtime_error, "Too many slices for view.");
876 }
877
878 if (has_ellipsis)
879 {
880 // replace ellipsis with N * xt::all
881 // remove -1 because of the ellipsis slize itself
882 n_add_all = shape.size() - (slices.size() - 1 - n_newaxis);
883 }
884
885 // Compute strided view
886 new_offset = base_offset;
887 new_shape.resize(dimension);
888 new_strides.resize(dimension);
889 base_type::resize(dimension);
890
891 auto old_shape = shape;
892 using old_strides_value_type = typename std::decay_t<ST>::value_type;
893
894 std::ptrdiff_t axis_skip = 0;
895 std::size_t idx = 0, i = 0, i_ax = 0;
896
897 auto slice_getter = detail::slice_getter_impl<S>(shape);
898
899 for (; i < slices.size(); ++i)
900 {
901 i_ax = static_cast<std::size_t>(static_cast<std::ptrdiff_t>(i) - axis_skip);
902 auto ptr = xtl::get_if<std::ptrdiff_t>(&slices[i]);
903 if (ptr != nullptr)
904 {
905 auto slice0 = static_cast<old_strides_value_type>(*ptr);
906 new_offset += static_cast<std::size_t>(slice0 * old_strides[i_ax]);
907 }
908 else if (xtl::get_if<xt::xnewaxis_tag>(&slices[i]) != nullptr)
909 {
910 new_shape[idx] = 1;
911 base_type::set_fake_slice(idx);
912 ++axis_skip, ++idx;
913 }
914 else if (xtl::get_if<xt::xellipsis_tag>(&slices[i]) != nullptr)
915 {
916 for (std::size_t j = 0; j < n_add_all; ++j)
917 {
918 new_shape[idx] = old_shape[i_ax];
919 new_strides[idx] = old_strides[i_ax];
920 base_type::set_fake_slice(idx);
921 ++idx, ++i_ax;
922 }
923 axis_skip = axis_skip - static_cast<std::ptrdiff_t>(n_add_all) + 1;
924 }
925 else if (xtl::get_if<xt::xall_tag>(&slices[i]) != nullptr)
926 {
927 new_shape[idx] = old_shape[i_ax];
928 new_strides[idx] = old_strides[i_ax];
929 base_type::set_fake_slice(idx);
930 ++idx;
931 }
932 else if (base_type::fill_args(slices, i, idx, old_shape[i_ax], old_strides[i_ax], new_shape, new_strides))
933 {
934 ++idx;
935 }
936 else
937 {
938 slice_getter.idx = i_ax;
939 auto info = xtl::visit(slice_getter, slices[i]);
940 new_offset += static_cast<std::size_t>(info[0] * old_strides[i_ax]);
941 new_shape[idx] = static_cast<std::size_t>(info[1]);
942 new_strides[idx] = info[2] * old_strides[i_ax];
943 base_type::set_fake_slice(idx);
944 ++idx;
945 }
946 }
947
948 i_ax = static_cast<std::size_t>(static_cast<std::ptrdiff_t>(i) - axis_skip);
949 for (; i_ax < old_shape.size(); ++i_ax, ++idx)
950 {
951 new_shape[idx] = old_shape[i_ax];
952 new_strides[idx] = old_strides[i_ax];
953 base_type::set_fake_slice(idx);
954 }
955
956 new_layout = do_strides_match(new_shape, new_strides, layout, true) ? layout
957 : layout_type::dynamic;
958 }
959
960 using shape_type = dynamic_shape<std::size_t>;
961 shape_type new_shape;
962 using strides_type = dynamic_shape<std::ptrdiff_t>;
963 strides_type new_strides;
964 std::size_t new_offset;
965 layout_type new_layout;
966 };
967 }
968}
969
970#endif
Base class for implementation of common expression access methods.
layout_type layout() const noexcept
Returns the layout of the xtrided_view_base.
xstrided_view_base(CTA &&e, SA &&shape, strides_type &&strides, size_type offset, layout_type layout) noexcept
Constructs an xstrided_view_base.
bool has_linear_assign(const O &strides) const noexcept
Checks whether the xstrided_view_base can be linearly assigned to an expression with the specified st...
const inner_strides_type & strides() const noexcept
Returns the strides of the xtrided_view_base.
bool broadcast_shape(O &shape, bool reuse_cache=false) const
Broadcast the shape of the view to the specified parameter.
const inner_backstrides_type & backstrides() const noexcept
Returns the backstrides of the xtrided_view_base.
const inner_shape_type & shape() const noexcept
Returns the shape of the xtrided_view_base.
size_type data_offset() const noexcept
Returns the offset to the first element in the view.
storage_type & storage() noexcept
Returns a reference to the buffer containing the elements of the view.
xexpression_type & expression() noexcept
Returns a reference to the underlying expression of the view.
std::size_t compute_strides(const shape_type &shape, layout_type l, strides_type &strides)
Compute the strides given the shape and the layout of an array.
Definition xstrides.hpp:566
auto strides(const E &e, stride_type type=stride_type::normal) noexcept
Get strides of an object.
Definition xstrides.hpp:248
standard mathematical functions for xexpressions
auto range(A start_val, B stop_val)
Select a range from start_val to stop_val (excluded).
Definition xslice.hpp:818
layout_type
Definition xlayout.hpp:24