10#ifndef XTENSOR_STRIDES_HPP
11#define XTENSOR_STRIDES_HPP
18#include <xtl/xsequence.hpp>
20#include "xexception.hpp"
22#include "xtensor_config.hpp"
23#include "xtensor_forward.hpp"
28 template <
class shape_type>
29 std::size_t compute_size(
const shape_type& shape)
noexcept;
39 template <
class offset_type,
class S>
40 offset_type data_offset(
const S&
strides)
noexcept;
65 template <
class offset_type,
class S,
class Arg,
class... Args>
66 offset_type data_offset(
const S&
strides, Arg arg, Args... args)
noexcept;
69 offset_type unchecked_data_offset(
const S&
strides, Args... args)
noexcept;
71 template <
class offset_type,
class S,
class It>
72 offset_type element_offset(
const S&
strides, It first, It last)
noexcept;
87 template <layout_type L = layout_type::dynamic,
class shape_type,
class str
ides_type>
90 template <layout_type L = layout_type::dynamic,
class shape_type,
class str
ides_type,
class backstr
ides_type>
94 template <
class shape_type,
class str
ides_type>
95 void adapt_strides(
const shape_type& shape, strides_type&
strides)
noexcept;
97 template <
class shape_type,
class str
ides_type,
class backstr
ides_type>
98 void adapt_strides(
const shape_type& shape, strides_type&
strides, backstrides_type& backstrides)
noexcept;
111 template <
class S,
class T>
112 std::vector<get_strides_t<S>>
119 template <
class S,
class size_type>
120 S uninitialized_shape(size_type size);
122 template <
class S1,
class S2>
123 bool broadcast_shape(
const S1& input, S2& output);
125 template <
class S1,
class S2>
126 bool broadcastable(
const S1& s1, S2& s2);
132 template <layout_type L>
147 template <
class S,
class...
Args>
162 template <
class S,
class...
Args>
169 template <
class C,
class It,
class size_type>
172 using difference_type =
typename std::iterator_traits<It>::difference_type;
173 if (
c.dimension() == 0)
179 for (std::size_t i = 0; i != c.dimension(); ++i)
181 begin += c.strides()[i] * difference_type(c.shape()[i] - 1);
185 begin += c.strides().back();
191 begin += c.strides().front();
204 template <
class return_type,
class S,
class T,
class D>
205 inline return_type compute_stride_impl(
layout_type layout,
const S& shape, T axis, D default_stride)
209 return std::accumulate(
210 shape.cbegin() + axis + 1,
212 static_cast<return_type
>(1),
213 std::multiplies<return_type>()
218 return std::accumulate(
220 shape.cbegin() + axis,
221 static_cast<return_type
>(1),
222 std::multiplies<return_type>()
225 return default_stride;
250 using strides_type =
typename E::strides_type;
251 using return_type =
typename strides_type::value_type;
252 strides_type
ret =
e.strides();
253 auto shape =
e.shape();
260 for (std::size_t
i = 0;
i <
ret.size(); ++
i)
264 ret[
i] = detail::compute_stride_impl<return_type>(
e.layout(), shape,
i,
ret[
i]);
270 return_type
f =
static_cast<return_type
>(
sizeof(
typename E::value_type));
294 using strides_type =
typename E::strides_type;
295 using return_type =
typename strides_type::value_type;
297 return_type
ret =
e.strides()[axis];
306 if (
e.shape(axis) == 1)
308 ret = detail::compute_stride_impl<return_type>(
e.layout(),
e.shape(), axis,
ret);
314 return_type
f =
static_cast<return_type
>(
sizeof(
typename E::value_type));
327 template <
class shape_type>
328 inline std::size_t compute_size_impl(
const shape_type& shape, std::true_type )
330 using size_type = std::decay_t<typename shape_type::value_type>;
331 return static_cast<std::size_t
>(std::abs(
332 std::accumulate(shape.cbegin(), shape.cend(), size_type(1), std::multiplies<size_type>())
336 template <
class shape_type>
337 inline std::size_t compute_size_impl(
const shape_type& shape, std::false_type )
339 using size_type = std::decay_t<typename shape_type::value_type>;
340 return static_cast<std::size_t
>(
341 std::accumulate(shape.cbegin(), shape.cend(), size_type(1), std::multiplies<size_type>())
346 template <
class shape_type>
347 inline std::size_t compute_size(
const shape_type& shape)
noexcept
349 return detail::compute_size_impl(
351 xtl::is_signed<std::decay_t<
typename std::decay_t<shape_type>::value_type>>()
358 template <std::
size_t dim,
class S>
359 inline auto raw_data_offset(
const S&)
noexcept
361 using strides_value_type = std::decay_t<decltype(std::declval<S>()[0])>;
362 return strides_value_type(0);
365 template <std::
size_t dim,
class S>
366 inline auto raw_data_offset(
const S&, missing_type)
noexcept
368 using strides_value_type = std::decay_t<decltype(std::declval<S>()[0])>;
369 return strides_value_type(0);
372 template <std::size_t dim,
class S,
class Arg,
class... Args>
373 inline auto raw_data_offset(
const S&
strides, Arg arg, Args... args)
noexcept
375 return static_cast<std::ptrdiff_t
>(
arg) *
strides[dim] + raw_data_offset<dim + 1>(
strides, args...);
378 template <layout_type L, std::ptrdiff_t static_dim>
379 struct layout_data_offset
381 template <std::size_t dim,
class S,
class Arg,
class... Args>
382 inline static auto run(
const S&
strides, Arg arg, Args... args)
noexcept
384 return raw_data_offset<dim>(
strides, arg, args...);
388 template <std::ptrdiff_t static_dim>
391 using self_type = layout_data_offset<layout_type::row_major, static_dim>;
393 template <std::
size_t dim,
class S,
class Arg>
394 inline static auto run(
const S&
strides, Arg arg)
noexcept
396 if (std::ptrdiff_t(dim) + 1 == static_dim)
406 template <std::size_t dim,
class S,
class Arg,
class... Args>
407 inline static auto run(
const S&
strides, Arg arg, Args... args)
noexcept
409 return arg *
strides[dim] + self_type::template run<dim + 1>(
strides, args...);
413 template <std::ptrdiff_t static_dim>
416 using self_type = layout_data_offset<layout_type::column_major, static_dim>;
418 template <std::
size_t dim,
class S,
class Arg>
419 inline static auto run(
const S&
strides, Arg arg)
noexcept
431 template <std::size_t dim,
class S,
class Arg,
class... Args>
432 inline static auto run(
const S&
strides, Arg arg, Args... args)
noexcept
436 return arg + self_type::template run<dim + 1>(
strides, args...);
440 return arg *
strides[dim] + self_type::template run<dim + 1>(
strides, args...);
446 template <
class offset_type,
class S>
447 inline offset_type data_offset(
const S&)
noexcept
449 return offset_type(0);
452 template <
class offset_type,
class S,
class Arg,
class... Args>
455 constexpr std::size_t
nargs =
sizeof...(Args) + 1;
459 return static_cast<offset_type
>(detail::raw_data_offset<0>(
strides, arg,
args...));
466 else if (detail::last_type_is_missing<Args...>)
469 return static_cast<offset_type
>(detail::raw_data_offset<0>(
strides, arg,
args...));
475 return static_cast<offset_type
>(detail::raw_data_offset<0>(
view, arg,
args...));
479 template <
class offset_type,
layout_type L,
class S,
class... Args>
480 inline offset_type unchecked_data_offset(
const S&
strides, Args... args)
noexcept
482 return static_cast<offset_type
>(
483 detail::layout_data_offset<L, static_dimension<S>::value>::template run<0>(
strides.cbegin(), args...)
487 template <
class offset_type,
class S,
class It>
488 inline offset_type element_offset(
const S&
strides, It first, It last)
noexcept
490 using difference_type =
typename std::iterator_traits<It>::difference_type;
491 auto size =
static_cast<difference_type
>(
492 (std::min)(
static_cast<typename S::size_type
>(std::distance(first, last)),
strides.size())
494 return std::inner_product(last - size, last,
strides.cend() - size, offset_type(0));
499 template <
class shape_type,
class str
ides_type,
class bs_ptr>
500 inline void adapt_strides(
501 const shape_type& shape,
504 typename strides_type::size_type i
511 (*backstrides)[i] =
strides[i] * std::ptrdiff_t(shape[i] - 1);
514 template <
class shape_type,
class str
ides_type>
515 inline void adapt_strides(
516 const shape_type& shape,
519 typename strides_type::size_type i
528 template <layout_type L,
class shape_type,
class str
ides_type,
class bs_ptr>
532 using strides_value_type =
typename std::decay_t<strides_type>::value_type;
533 strides_value_type data_size = 1;
535#if defined(_MSC_VER) && (1931 <= _MSC_VER)
537 if (0 == shape.size())
539 return static_cast<std::size_t
>(data_size);
545 for (std::size_t i = shape.size(); i != 0; --i)
548 data_size =
strides[i - 1] *
static_cast<strides_value_type
>(shape[i - 1]);
549 adapt_strides(shape,
strides, bs, i - 1);
554 for (std::size_t i = 0; i < shape.size(); ++i)
557 data_size =
strides[i] *
static_cast<strides_value_type
>(shape[i]);
558 adapt_strides(shape,
strides, bs, i);
561 return static_cast<std::size_t
>(data_size);
565 template <layout_type L,
class shape_type,
class str
ides_type>
568 return detail::compute_strides<L>(shape,
l,
strides,
nullptr);
571 template <layout_type L,
class shape_type,
class str
ides_type,
class backstr
ides_type>
575 return detail::compute_strides<L>(shape, l,
strides, &backstrides);
578 template <
class T1,
class T2>
580 stride_match_condition(
const T1& stride,
const T2& shape,
const T1& data_size,
bool zero_strides)
582 return (shape == T2(1) && stride == T1(0) && zero_strides) || (stride == data_size);
586 template <
class shape_type,
class str
ides_type>
588 do_strides_match(
const shape_type& shape,
const strides_type&
strides,
layout_type l,
bool zero_strides)
590 using value_type =
typename strides_type::value_type;
591 value_type data_size = 1;
594 for (std::size_t i =
strides.size(); i != 0; --i)
596 if (!stride_match_condition(
strides[i - 1], shape[i - 1], data_size, zero_strides))
600 data_size *=
static_cast<value_type
>(shape[i - 1]);
606 for (std::size_t i = 0; i <
strides.size(); ++i)
608 if (!stride_match_condition(
strides[i], shape[i], data_size, zero_strides))
612 data_size *=
static_cast<value_type
>(shape[i]);
622 template <
class shape_type,
class str
ides_type>
623 inline void adapt_strides(
const shape_type& shape, strides_type&
strides)
noexcept
625 for (
typename shape_type::size_type i = 0; i < shape.size(); ++i)
627 detail::adapt_strides(shape,
strides,
nullptr, i);
631 template <
class shape_type,
class str
ides_type,
class backstr
ides_type>
633 adapt_strides(
const shape_type& shape, strides_type&
strides, backstrides_type& backstrides)
noexcept
635 for (
typename shape_type::size_type i = 0; i < shape.size(); ++i)
637 detail::adapt_strides(shape,
strides, &backstrides, i);
644 inline S unravel_noexcept(
typename S::value_type idx,
const S&
strides,
layout_type l)
noexcept
646 using value_type =
typename S::value_type;
647 using size_type =
typename S::size_type;
648 S result = xtl::make_sequence<S>(
strides.size(), 0);
651 for (size_type i = 0; i <
strides.size(); ++i)
654 value_type quot = str != 0 ? idx / str : 0;
655 idx = str != 0 ? idx % str : idx;
661 for (size_type i =
strides.size(); i != 0; --i)
663 value_type str =
strides[i - 1];
664 value_type quot = str != 0 ? idx / str : 0;
665 idx = str != 0 ? idx % str : idx;
666 result[i - 1] = quot;
674 inline S unravel_from_strides(
typename S::value_type index,
const S&
strides,
layout_type l)
678 XTENSOR_THROW(std::runtime_error,
"unravel_index: dynamic layout not supported");
680 return detail::unravel_noexcept(index,
strides, l);
683 template <
class S,
class T>
684 inline get_value_type_t<T> ravel_from_strides(
const T& index,
const S&
strides)
686 return element_offset<get_value_type_t<T>>(
strides, index.begin(), index.end());
690 inline get_strides_t<S> unravel_index(
typename S::value_type index,
const S& shape,
layout_type l)
692 using strides_type = get_strides_t<S>;
693 using strides_value_type =
typename strides_type::value_type;
694 strides_type
strides = xtl::make_sequence<strides_type>(shape.size(), 0);
696 return unravel_from_strides(
static_cast<strides_value_type
>(index),
strides, l);
699 template <
class S,
class T>
700 inline std::vector<get_strides_t<S>> unravel_indices(
const T& idx,
const S& shape,
layout_type l)
702 using strides_type = get_strides_t<S>;
703 using strides_value_type =
typename strides_type::value_type;
704 strides_type
strides = xtl::make_sequence<strides_type>(shape.size(), 0);
706 std::vector<get_strides_t<S>> out(idx.size());
707 auto out_iter = out.begin();
708 auto idx_iter = idx.begin();
709 for (; out_iter != out.end(); ++out_iter, ++idx_iter)
711 *out_iter = unravel_from_strides(
static_cast<strides_value_type
>(*idx_iter),
strides, l);
716 template <
class S,
class T>
717 inline get_value_type_t<T> ravel_index(
const T& index,
const S& shape,
layout_type l)
719 using strides_type = get_strides_t<S>;
720 strides_type
strides = xtl::make_sequence<strides_type>(shape.size(), 0);
722 return ravel_from_strides(index,
strides);
725 template <
class S,
class stype>
726 inline S uninitialized_shape(stype size)
728 using value_type =
typename S::value_type;
729 using size_type =
typename S::size_type;
730 return xtl::make_sequence<S>(
static_cast<size_type
>(size), std::numeric_limits<value_type>::max());
733 template <
class S1,
class S2>
734 inline bool broadcast_shape(
const S1& input, S2& output)
736 bool trivial_broadcast = (input.size() == output.size());
738 using value_type =
typename S2::value_type;
739 auto output_index = output.size();
740 auto input_index = input.size();
742 if (output_index < input_index)
744 throw_broadcast_error(output, input);
746 for (; input_index != 0; --input_index, --output_index)
751 if (output[output_index - 1] == std::numeric_limits<value_type>::max())
753 output[output_index - 1] =
static_cast<value_type
>(input[input_index - 1]);
757 else if (output[output_index - 1] == 1)
759 output[output_index - 1] =
static_cast<value_type
>(input[input_index - 1]);
760 trivial_broadcast = trivial_broadcast && (input[input_index - 1] == 1);
764 else if (input[input_index - 1] == 1)
766 trivial_broadcast =
false;
770 else if (
static_cast<value_type
>(input[input_index - 1]) != output[output_index - 1])
772 throw_broadcast_error(output, input);
775 return trivial_broadcast;
778 template <
class S1,
class S2>
779 inline bool broadcastable(
const S1& src_shape,
const S2& dst_shape)
781 auto src_iter = src_shape.crbegin();
782 auto dst_iter = dst_shape.crbegin();
783 bool res = dst_shape.size() >= src_shape.size();
784 for (; src_iter != src_shape.crend() && res; ++src_iter, ++dst_iter)
786 res = (
static_cast<std::size_t
>(*src_iter) ==
static_cast<std::size_t
>(*dst_iter))
795 template <
class S1,
class S2>
796 static std::size_t get(
const S1&
s1,
const S2&
s2)
798 using value_type =
typename S1::value_type;
817 template <
class S1,
class S2>
818 static std::size_t get(
const S1&
s1,
const S2&
s2)
821 using size_type =
typename S1::size_type;
822 using value_type =
typename S1::value_type;
827 if (
s1.size() !=
s2.size())
832 auto size =
s2.size();
834 for (; index < size; ++index)
836 if (
static_cast<value_type
>(
s1[index]) !=
static_cast<value_type
>(
s2[index]))
847 template <
class S, std::
size_t dim>
848 inline bool check_in_bounds_impl(
const S&)
853 template <
class S, std::
size_t dim>
854 inline bool check_in_bounds_impl(
const S&, missing_type)
859 template <
class S, std::size_t dim,
class T,
class... Args>
860 inline bool check_in_bounds_impl(
const S& shape, T& arg, Args&... args)
862 if (
sizeof...(Args) + 1 > shape.size())
864 return check_in_bounds_impl<S, dim>(shape, args...);
868 return arg >= T(0) && arg < static_cast<T>(shape[dim])
869 && check_in_bounds_impl<S, dim + 1>(shape, args...);
874 template <
class S,
class... Args>
875 inline bool check_in_bounds(
const S& shape, Args&... args)
877 return detail::check_in_bounds_impl<S, 0>(shape, args...);
882 template <
class S, std::
size_t dim>
883 inline void normalize_periodic_impl(
const S&)
887 template <
class S, std::
size_t dim>
888 inline void normalize_periodic_impl(
const S&, missing_type)
892 template <
class S, std::size_t dim,
class T,
class... Args>
893 inline void normalize_periodic_impl(
const S& shape, T& arg, Args&... args)
895 if (
sizeof...(Args) + 1 > shape.size())
897 normalize_periodic_impl<S, dim>(shape, args...);
901 T n =
static_cast<T
>(shape[dim]);
902 arg = (n + (arg % n)) % n;
903 normalize_periodic_impl<S, dim + 1>(shape, args...);
908 template <
class S,
class... Args>
911 check_dimension(shape,
args...);
912 detail::normalize_periodic_impl<S, 0>(shape,
args...);
auto arg(E &&e) noexcept
Calculates the phase angle (in radians) elementwise for the complex numbers in e.
std::size_t compute_strides(const shape_type &shape, layout_type l, strides_type &strides)
Compute the strides given the shape and the layout of an array.
auto strides(const E &e, stride_type type=stride_type::normal) noexcept
Get strides of an object.
stride_type
Choose stride type.
void normalize_periodic(const S &shape, Args &... args)
Normalise an index of a periodic array.
@ bytes
Normal stride in bytes.
@ internal
As used internally (with stride(axis) == 0 if shape(axis) == 1)
@ normal
Normal stride corresponding to storage.
standard mathematical functions for xexpressions
bool in_bounds(const S &shape, Args &... args)
Check if the index is within the bounds of the array.
auto view(E &&e, S &&... slices)
Constructs and returns a view on the specified xexpression.