xtensor
Loading...
Searching...
No Matches
xstrides.hpp
1/***************************************************************************
2 * Copyright (c) Johan Mabille, Sylvain Corlay and Wolf Vollprecht *
3 * Copyright (c) QuantStack *
4 * *
5 * Distributed under the terms of the BSD 3-Clause License. *
6 * *
7 * The full license is in the file LICENSE, distributed with this software. *
8 ****************************************************************************/
9
10#ifndef XTENSOR_STRIDES_HPP
11#define XTENSOR_STRIDES_HPP
12
13#include <cstddef>
14#include <functional>
15#include <limits>
16#include <numeric>
17
18#include <xtl/xsequence.hpp>
19
20#include "xexception.hpp"
21#include "xshape.hpp"
22#include "xtensor_config.hpp"
23#include "xtensor_forward.hpp"
24
25namespace xt
26{
27
28 template <class shape_type>
29 std::size_t compute_size(const shape_type& shape) noexcept;
30
35 /***************
36 * data offset *
37 ***************/
38
39 template <class offset_type, class S>
40 offset_type data_offset(const S& strides) noexcept;
41
65 template <class offset_type, class S, class Arg, class... Args>
66 offset_type data_offset(const S& strides, Arg arg, Args... args) noexcept;
67
68 template <class offset_type, layout_type L = layout_type::dynamic, class S, class... Args>
69 offset_type unchecked_data_offset(const S& strides, Args... args) noexcept;
70
71 template <class offset_type, class S, class It>
72 offset_type element_offset(const S& strides, It first, It last) noexcept;
73
74 /*******************
75 * strides builder *
76 *******************/
77
87 template <layout_type L = layout_type::dynamic, class shape_type, class strides_type>
88 std::size_t compute_strides(const shape_type& shape, layout_type l, strides_type& strides);
89
90 template <layout_type L = layout_type::dynamic, class shape_type, class strides_type, class backstrides_type>
91 std::size_t
92 compute_strides(const shape_type& shape, layout_type l, strides_type& strides, backstrides_type& backstrides);
93
94 template <class shape_type, class strides_type>
95 void adapt_strides(const shape_type& shape, strides_type& strides) noexcept;
96
97 template <class shape_type, class strides_type, class backstrides_type>
98 void adapt_strides(const shape_type& shape, strides_type& strides, backstrides_type& backstrides) noexcept;
99
100 /*****************
101 * unravel_index *
102 *****************/
103
104 template <class S>
105 S unravel_from_strides(typename S::value_type index, const S& strides, layout_type l = layout_type::row_major);
106
107 template <class S>
108 get_strides_t<S>
109 unravel_index(typename S::value_type index, const S& shape, layout_type l = layout_type::row_major);
110
111 template <class S, class T>
112 std::vector<get_strides_t<S>>
113 unravel_indices(const T& indices, const S& shape, layout_type l = layout_type::row_major);
114
115 /***********************
116 * broadcast functions *
117 ***********************/
118
119 template <class S, class size_type>
120 S uninitialized_shape(size_type size);
121
122 template <class S1, class S2>
123 bool broadcast_shape(const S1& input, S2& output);
124
125 template <class S1, class S2>
126 bool broadcastable(const S1& s1, S2& s2);
127
128 /*************************
129 * check strides overlap *
130 *************************/
131
132 template <layout_type L>
134
135 /**********************************
136 * check bounds, without throwing *
137 **********************************/
138
147 template <class S, class... Args>
148 bool in_bounds(const S& shape, Args&... args);
149
150 /********************************
151 * apply periodicity to indices *
152 *******************************/
153
162 template <class S, class... Args>
163 void normalize_periodic(const S& shape, Args&... args);
164
165 /********************************************
166 * utility functions for strided containers *
167 ********************************************/
168
169 template <class C, class It, class size_type>
170 It strided_data_end(const C& c, It begin, layout_type l, size_type offset)
171 {
172 using difference_type = typename std::iterator_traits<It>::difference_type;
173 if (c.dimension() == 0)
174 {
175 ++begin;
176 }
177 else
178 {
179 for (std::size_t i = 0; i != c.dimension(); ++i)
180 {
181 begin += c.strides()[i] * difference_type(c.shape()[i] - 1);
182 }
183 if (l == layout_type::row_major)
184 {
185 begin += c.strides().back();
186 }
187 else
188 {
189 if (offset == 0)
190 {
191 begin += c.strides().front();
192 }
193 }
194 }
195 return begin;
196 }
197
198 /***********
199 * strides *
200 ***********/
201
202 namespace detail
203 {
204 template <class return_type, class S, class T, class D>
205 inline return_type compute_stride_impl(layout_type layout, const S& shape, T axis, D default_stride)
206 {
207 if (layout == layout_type::row_major)
208 {
209 return std::accumulate(
210 shape.cbegin() + axis + 1,
211 shape.cend(),
212 static_cast<return_type>(1),
213 std::multiplies<return_type>()
214 );
215 }
216 if (layout == layout_type::column_major)
217 {
218 return std::accumulate(
219 shape.cbegin(),
220 shape.cbegin() + axis,
221 static_cast<return_type>(1),
222 std::multiplies<return_type>()
223 );
224 }
225 return default_stride;
226 }
227 }
228
233 enum class stride_type
234 {
235 internal = 0,
236 normal = 1,
237 bytes = 2,
238 };
239
247 template <class E>
248 inline auto strides(const E& e, stride_type type = stride_type::normal) noexcept
249 {
250 using strides_type = typename E::strides_type;
251 using return_type = typename strides_type::value_type;
252 strides_type ret = e.strides();
253 auto shape = e.shape();
254
255 if (type == stride_type::internal)
256 {
257 return ret;
258 }
259
260 for (std::size_t i = 0; i < ret.size(); ++i)
261 {
262 if (shape[i] == 1)
263 {
264 ret[i] = detail::compute_stride_impl<return_type>(e.layout(), shape, i, ret[i]);
265 }
266 }
267
268 if (type == stride_type::bytes)
269 {
270 return_type f = static_cast<return_type>(sizeof(typename E::value_type));
271 std::for_each(
272 ret.begin(),
273 ret.end(),
274 [f](auto& c)
275 {
276 c *= f;
277 }
278 );
279 }
280
281 return ret;
282 }
283
291 template <class E>
292 inline auto strides(const E& e, std::size_t axis, stride_type type = stride_type::normal) noexcept
293 {
294 using strides_type = typename E::strides_type;
295 using return_type = typename strides_type::value_type;
296
297 return_type ret = e.strides()[axis];
298
299 if (type == stride_type::internal)
300 {
301 return ret;
302 }
303
304 if (ret == 0)
305 {
306 if (e.shape(axis) == 1)
307 {
308 ret = detail::compute_stride_impl<return_type>(e.layout(), e.shape(), axis, ret);
309 }
310 }
311
312 if (type == stride_type::bytes)
313 {
314 return_type f = static_cast<return_type>(sizeof(typename E::value_type));
315 ret *= f;
316 }
317
318 return ret;
319 }
320
321 /******************
322 * Implementation *
323 ******************/
324
325 namespace detail
326 {
327 template <class shape_type>
328 inline std::size_t compute_size_impl(const shape_type& shape, std::true_type /* is signed */)
329 {
330 using size_type = std::decay_t<typename shape_type::value_type>;
331 return static_cast<std::size_t>(std::abs(
332 std::accumulate(shape.cbegin(), shape.cend(), size_type(1), std::multiplies<size_type>())
333 ));
334 }
335
336 template <class shape_type>
337 inline std::size_t compute_size_impl(const shape_type& shape, std::false_type /* is not signed */)
338 {
339 using size_type = std::decay_t<typename shape_type::value_type>;
340 return static_cast<std::size_t>(
341 std::accumulate(shape.cbegin(), shape.cend(), size_type(1), std::multiplies<size_type>())
342 );
343 }
344 }
345
346 template <class shape_type>
347 inline std::size_t compute_size(const shape_type& shape) noexcept
348 {
349 return detail::compute_size_impl(
350 shape,
351 xtl::is_signed<std::decay_t<typename std::decay_t<shape_type>::value_type>>()
352 );
353 }
354
355 namespace detail
356 {
357
358 template <std::size_t dim, class S>
359 inline auto raw_data_offset(const S&) noexcept
360 {
361 using strides_value_type = std::decay_t<decltype(std::declval<S>()[0])>;
362 return strides_value_type(0);
363 }
364
365 template <std::size_t dim, class S>
366 inline auto raw_data_offset(const S&, missing_type) noexcept
367 {
368 using strides_value_type = std::decay_t<decltype(std::declval<S>()[0])>;
369 return strides_value_type(0);
370 }
371
372 template <std::size_t dim, class S, class Arg, class... Args>
373 inline auto raw_data_offset(const S& strides, Arg arg, Args... args) noexcept
374 {
375 return static_cast<std::ptrdiff_t>(arg) * strides[dim] + raw_data_offset<dim + 1>(strides, args...);
376 }
377
378 template <layout_type L, std::ptrdiff_t static_dim>
379 struct layout_data_offset
380 {
381 template <std::size_t dim, class S, class Arg, class... Args>
382 inline static auto run(const S& strides, Arg arg, Args... args) noexcept
383 {
384 return raw_data_offset<dim>(strides, arg, args...);
385 }
386 };
387
388 template <std::ptrdiff_t static_dim>
389 struct layout_data_offset<layout_type::row_major, static_dim>
390 {
391 using self_type = layout_data_offset<layout_type::row_major, static_dim>;
392
393 template <std::size_t dim, class S, class Arg>
394 inline static auto run(const S& strides, Arg arg) noexcept
395 {
396 if (std::ptrdiff_t(dim) + 1 == static_dim)
397 {
398 return arg;
399 }
400 else
401 {
402 return arg * strides[dim];
403 }
404 }
405
406 template <std::size_t dim, class S, class Arg, class... Args>
407 inline static auto run(const S& strides, Arg arg, Args... args) noexcept
408 {
409 return arg * strides[dim] + self_type::template run<dim + 1>(strides, args...);
410 }
411 };
412
413 template <std::ptrdiff_t static_dim>
414 struct layout_data_offset<layout_type::column_major, static_dim>
415 {
416 using self_type = layout_data_offset<layout_type::column_major, static_dim>;
417
418 template <std::size_t dim, class S, class Arg>
419 inline static auto run(const S& strides, Arg arg) noexcept
420 {
421 if (dim == 0)
422 {
423 return arg;
424 }
425 else
426 {
427 return arg * strides[dim];
428 }
429 }
430
431 template <std::size_t dim, class S, class Arg, class... Args>
432 inline static auto run(const S& strides, Arg arg, Args... args) noexcept
433 {
434 if (dim == 0)
435 {
436 return arg + self_type::template run<dim + 1>(strides, args...);
437 }
438 else
439 {
440 return arg * strides[dim] + self_type::template run<dim + 1>(strides, args...);
441 }
442 }
443 };
444 }
445
446 template <class offset_type, class S>
447 inline offset_type data_offset(const S&) noexcept
448 {
449 return offset_type(0);
450 }
451
452 template <class offset_type, class S, class Arg, class... Args>
453 inline offset_type data_offset(const S& strides, Arg arg, Args... args) noexcept
454 {
455 constexpr std::size_t nargs = sizeof...(Args) + 1;
456 if (nargs == strides.size())
457 {
458 // Correct number of arguments: iterate
459 return static_cast<offset_type>(detail::raw_data_offset<0>(strides, arg, args...));
460 }
461 else if (nargs > strides.size())
462 {
463 // Too many arguments: drop the first
465 }
466 else if (detail::last_type_is_missing<Args...>)
467 {
468 // Too few arguments & last argument xt::missing: postfix index with zeros
469 return static_cast<offset_type>(detail::raw_data_offset<0>(strides, arg, args...));
470 }
471 else
472 {
473 // Too few arguments: right to left scalar product
474 auto view = strides.cend() - nargs;
475 return static_cast<offset_type>(detail::raw_data_offset<0>(view, arg, args...));
476 }
477 }
478
479 template <class offset_type, layout_type L, class S, class... Args>
480 inline offset_type unchecked_data_offset(const S& strides, Args... args) noexcept
481 {
482 return static_cast<offset_type>(
483 detail::layout_data_offset<L, static_dimension<S>::value>::template run<0>(strides.cbegin(), args...)
484 );
485 }
486
487 template <class offset_type, class S, class It>
488 inline offset_type element_offset(const S& strides, It first, It last) noexcept
489 {
490 using difference_type = typename std::iterator_traits<It>::difference_type;
491 auto size = static_cast<difference_type>(
492 (std::min)(static_cast<typename S::size_type>(std::distance(first, last)), strides.size())
493 );
494 return std::inner_product(last - size, last, strides.cend() - size, offset_type(0));
495 }
496
497 namespace detail
498 {
499 template <class shape_type, class strides_type, class bs_ptr>
500 inline void adapt_strides(
501 const shape_type& shape,
502 strides_type& strides,
503 bs_ptr backstrides,
504 typename strides_type::size_type i
505 ) noexcept
506 {
507 if (shape[i] == 1)
508 {
509 strides[i] = 0;
510 }
511 (*backstrides)[i] = strides[i] * std::ptrdiff_t(shape[i] - 1);
512 }
513
514 template <class shape_type, class strides_type>
515 inline void adapt_strides(
516 const shape_type& shape,
517 strides_type& strides,
518 std::nullptr_t,
519 typename strides_type::size_type i
520 ) noexcept
521 {
522 if (shape[i] == 1)
523 {
524 strides[i] = 0;
525 }
526 }
527
528 template <layout_type L, class shape_type, class strides_type, class bs_ptr>
529 inline std::size_t
530 compute_strides(const shape_type& shape, layout_type l, strides_type& strides, bs_ptr bs)
531 {
532 using strides_value_type = typename std::decay_t<strides_type>::value_type;
533 strides_value_type data_size = 1;
534
535#if defined(_MSC_VER) && (1931 <= _MSC_VER)
536 // Workaround MSVC compiler optimization bug, xtensor#2568
537 if (0 == shape.size())
538 {
539 return static_cast<std::size_t>(data_size);
540 }
541#endif
542
544 {
545 for (std::size_t i = shape.size(); i != 0; --i)
546 {
547 strides[i - 1] = data_size;
548 data_size = strides[i - 1] * static_cast<strides_value_type>(shape[i - 1]);
549 adapt_strides(shape, strides, bs, i - 1);
550 }
551 }
552 else
553 {
554 for (std::size_t i = 0; i < shape.size(); ++i)
555 {
556 strides[i] = data_size;
557 data_size = strides[i] * static_cast<strides_value_type>(shape[i]);
558 adapt_strides(shape, strides, bs, i);
559 }
560 }
561 return static_cast<std::size_t>(data_size);
562 }
563 }
564
565 template <layout_type L, class shape_type, class strides_type>
566 inline std::size_t compute_strides(const shape_type& shape, layout_type l, strides_type& strides)
567 {
568 return detail::compute_strides<L>(shape, l, strides, nullptr);
569 }
570
571 template <layout_type L, class shape_type, class strides_type, class backstrides_type>
572 inline std::size_t
573 compute_strides(const shape_type& shape, layout_type l, strides_type& strides, backstrides_type& backstrides)
574 {
575 return detail::compute_strides<L>(shape, l, strides, &backstrides);
576 }
577
578 template <class T1, class T2>
579 inline bool
580 stride_match_condition(const T1& stride, const T2& shape, const T1& data_size, bool zero_strides)
581 {
582 return (shape == T2(1) && stride == T1(0) && zero_strides) || (stride == data_size);
583 }
584
585 // zero_strides should be true when strides are set to 0 if the corresponding dimensions are 1
586 template <class shape_type, class strides_type>
587 inline bool
588 do_strides_match(const shape_type& shape, const strides_type& strides, layout_type l, bool zero_strides)
589 {
590 using value_type = typename strides_type::value_type;
591 value_type data_size = 1;
592 if (l == layout_type::row_major)
593 {
594 for (std::size_t i = strides.size(); i != 0; --i)
595 {
596 if (!stride_match_condition(strides[i - 1], shape[i - 1], data_size, zero_strides))
597 {
598 return false;
599 }
600 data_size *= static_cast<value_type>(shape[i - 1]);
601 }
602 return true;
603 }
604 else if (l == layout_type::column_major)
605 {
606 for (std::size_t i = 0; i < strides.size(); ++i)
607 {
608 if (!stride_match_condition(strides[i], shape[i], data_size, zero_strides))
609 {
610 return false;
611 }
612 data_size *= static_cast<value_type>(shape[i]);
613 }
614 return true;
615 }
616 else
617 {
618 return false;
619 }
620 }
621
622 template <class shape_type, class strides_type>
623 inline void adapt_strides(const shape_type& shape, strides_type& strides) noexcept
624 {
625 for (typename shape_type::size_type i = 0; i < shape.size(); ++i)
626 {
627 detail::adapt_strides(shape, strides, nullptr, i);
628 }
629 }
630
631 template <class shape_type, class strides_type, class backstrides_type>
632 inline void
633 adapt_strides(const shape_type& shape, strides_type& strides, backstrides_type& backstrides) noexcept
634 {
635 for (typename shape_type::size_type i = 0; i < shape.size(); ++i)
636 {
637 detail::adapt_strides(shape, strides, &backstrides, i);
638 }
639 }
640
641 namespace detail
642 {
643 template <class S>
644 inline S unravel_noexcept(typename S::value_type idx, const S& strides, layout_type l) noexcept
645 {
646 using value_type = typename S::value_type;
647 using size_type = typename S::size_type;
648 S result = xtl::make_sequence<S>(strides.size(), 0);
649 if (l == layout_type::row_major)
650 {
651 for (size_type i = 0; i < strides.size(); ++i)
652 {
653 value_type str = strides[i];
654 value_type quot = str != 0 ? idx / str : 0;
655 idx = str != 0 ? idx % str : idx;
656 result[i] = quot;
657 }
658 }
659 else
660 {
661 for (size_type i = strides.size(); i != 0; --i)
662 {
663 value_type str = strides[i - 1];
664 value_type quot = str != 0 ? idx / str : 0;
665 idx = str != 0 ? idx % str : idx;
666 result[i - 1] = quot;
667 }
668 }
669 return result;
670 }
671 }
672
673 template <class S>
674 inline S unravel_from_strides(typename S::value_type index, const S& strides, layout_type l)
675 {
677 {
678 XTENSOR_THROW(std::runtime_error, "unravel_index: dynamic layout not supported");
679 }
680 return detail::unravel_noexcept(index, strides, l);
681 }
682
683 template <class S, class T>
684 inline get_value_type_t<T> ravel_from_strides(const T& index, const S& strides)
685 {
686 return element_offset<get_value_type_t<T>>(strides, index.begin(), index.end());
687 }
688
689 template <class S>
690 inline get_strides_t<S> unravel_index(typename S::value_type index, const S& shape, layout_type l)
691 {
692 using strides_type = get_strides_t<S>;
693 using strides_value_type = typename strides_type::value_type;
694 strides_type strides = xtl::make_sequence<strides_type>(shape.size(), 0);
695 compute_strides(shape, l, strides);
696 return unravel_from_strides(static_cast<strides_value_type>(index), strides, l);
697 }
698
699 template <class S, class T>
700 inline std::vector<get_strides_t<S>> unravel_indices(const T& idx, const S& shape, layout_type l)
701 {
702 using strides_type = get_strides_t<S>;
703 using strides_value_type = typename strides_type::value_type;
704 strides_type strides = xtl::make_sequence<strides_type>(shape.size(), 0);
705 compute_strides(shape, l, strides);
706 std::vector<get_strides_t<S>> out(idx.size());
707 auto out_iter = out.begin();
708 auto idx_iter = idx.begin();
709 for (; out_iter != out.end(); ++out_iter, ++idx_iter)
710 {
711 *out_iter = unravel_from_strides(static_cast<strides_value_type>(*idx_iter), strides, l);
712 }
713 return out;
714 }
715
716 template <class S, class T>
717 inline get_value_type_t<T> ravel_index(const T& index, const S& shape, layout_type l)
718 {
719 using strides_type = get_strides_t<S>;
720 strides_type strides = xtl::make_sequence<strides_type>(shape.size(), 0);
721 compute_strides(shape, l, strides);
722 return ravel_from_strides(index, strides);
723 }
724
725 template <class S, class stype>
726 inline S uninitialized_shape(stype size)
727 {
728 using value_type = typename S::value_type;
729 using size_type = typename S::size_type;
730 return xtl::make_sequence<S>(static_cast<size_type>(size), std::numeric_limits<value_type>::max());
731 }
732
733 template <class S1, class S2>
734 inline bool broadcast_shape(const S1& input, S2& output)
735 {
736 bool trivial_broadcast = (input.size() == output.size());
737 // Indices are faster than reverse iterators
738 using value_type = typename S2::value_type;
739 auto output_index = output.size();
740 auto input_index = input.size();
741
742 if (output_index < input_index)
743 {
744 throw_broadcast_error(output, input);
745 }
746 for (; input_index != 0; --input_index, --output_index)
747 {
748 // First case: output = (MAX, MAX, ...., MAX)
749 // output is a new shape that has not been through
750 // the broadcast process yet; broadcast is trivial
751 if (output[output_index - 1] == std::numeric_limits<value_type>::max())
752 {
753 output[output_index - 1] = static_cast<value_type>(input[input_index - 1]);
754 }
755 // Second case: output has been initialized to 1. Broadcast is trivial
756 // only if input is 1 to.
757 else if (output[output_index - 1] == 1)
758 {
759 output[output_index - 1] = static_cast<value_type>(input[input_index - 1]);
760 trivial_broadcast = trivial_broadcast && (input[input_index - 1] == 1);
761 }
762 // Third case: output has been initialized to something different from 1.
763 // if input is 1, then the broadcast is not trivial
764 else if (input[input_index - 1] == 1)
765 {
766 trivial_broadcast = false;
767 }
768 // Last case: input and output must have the same value, else
769 // shape are not compatible and an exception is thrown
770 else if (static_cast<value_type>(input[input_index - 1]) != output[output_index - 1])
771 {
772 throw_broadcast_error(output, input);
773 }
774 }
775 return trivial_broadcast;
776 }
777
778 template <class S1, class S2>
779 inline bool broadcastable(const S1& src_shape, const S2& dst_shape)
780 {
781 auto src_iter = src_shape.crbegin();
782 auto dst_iter = dst_shape.crbegin();
783 bool res = dst_shape.size() >= src_shape.size();
784 for (; src_iter != src_shape.crend() && res; ++src_iter, ++dst_iter)
785 {
786 res = (static_cast<std::size_t>(*src_iter) == static_cast<std::size_t>(*dst_iter))
787 || (*src_iter == 1);
788 }
789 return res;
790 }
791
792 template <>
794 {
795 template <class S1, class S2>
796 static std::size_t get(const S1& s1, const S2& s2)
797 {
798 using value_type = typename S1::value_type;
799 // Indices are faster than reverse iterators
800 auto s1_index = s1.size();
801 auto s2_index = s2.size();
802
803 for (; s2_index != 0; --s1_index, --s2_index)
804 {
805 if (static_cast<value_type>(s1[s1_index - 1]) != static_cast<value_type>(s2[s2_index - 1]))
806 {
807 break;
808 }
809 }
810 return s1_index;
811 }
812 };
813
814 template <>
816 {
817 template <class S1, class S2>
818 static std::size_t get(const S1& s1, const S2& s2)
819 {
820 // Indices are faster than reverse iterators
821 using size_type = typename S1::size_type;
822 using value_type = typename S1::value_type;
823 size_type index = 0;
824
825 // This check is necessary as column major "broadcasting" is still
826 // performed in a row major fashion
827 if (s1.size() != s2.size())
828 {
829 return 0;
830 }
831
832 auto size = s2.size();
833
834 for (; index < size; ++index)
835 {
836 if (static_cast<value_type>(s1[index]) != static_cast<value_type>(s2[index]))
837 {
838 break;
839 }
840 }
841 return index;
842 }
843 };
844
845 namespace detail
846 {
847 template <class S, std::size_t dim>
848 inline bool check_in_bounds_impl(const S&)
849 {
850 return true;
851 }
852
853 template <class S, std::size_t dim>
854 inline bool check_in_bounds_impl(const S&, missing_type)
855 {
856 return true;
857 }
858
859 template <class S, std::size_t dim, class T, class... Args>
860 inline bool check_in_bounds_impl(const S& shape, T& arg, Args&... args)
861 {
862 if (sizeof...(Args) + 1 > shape.size())
863 {
864 return check_in_bounds_impl<S, dim>(shape, args...);
865 }
866 else
867 {
868 return arg >= T(0) && arg < static_cast<T>(shape[dim])
869 && check_in_bounds_impl<S, dim + 1>(shape, args...);
870 }
871 }
872 }
873
874 template <class S, class... Args>
875 inline bool check_in_bounds(const S& shape, Args&... args)
876 {
877 return detail::check_in_bounds_impl<S, 0>(shape, args...);
878 }
879
880 namespace detail
881 {
882 template <class S, std::size_t dim>
883 inline void normalize_periodic_impl(const S&)
884 {
885 }
886
887 template <class S, std::size_t dim>
888 inline void normalize_periodic_impl(const S&, missing_type)
889 {
890 }
891
892 template <class S, std::size_t dim, class T, class... Args>
893 inline void normalize_periodic_impl(const S& shape, T& arg, Args&... args)
894 {
895 if (sizeof...(Args) + 1 > shape.size())
896 {
897 normalize_periodic_impl<S, dim>(shape, args...);
898 }
899 else
900 {
901 T n = static_cast<T>(shape[dim]);
902 arg = (n + (arg % n)) % n;
903 normalize_periodic_impl<S, dim + 1>(shape, args...);
904 }
905 }
906 }
907
908 template <class S, class... Args>
909 inline void normalize_periodic(const S& shape, Args&... args)
910 {
911 check_dimension(shape, args...);
912 detail::normalize_periodic_impl<S, 0>(shape, args...);
913 }
914}
915
916#endif
auto arg(E &&e) noexcept
Calculates the phase angle (in radians) elementwise for the complex numbers in e.
Definition xcomplex.hpp:221
std::size_t compute_strides(const shape_type &shape, layout_type l, strides_type &strides)
Compute the strides given the shape and the layout of an array.
Definition xstrides.hpp:566
auto strides(const E &e, stride_type type=stride_type::normal) noexcept
Get strides of an object.
Definition xstrides.hpp:248
stride_type
Choose stride type.
Definition xstrides.hpp:234
void normalize_periodic(const S &shape, Args &... args)
Normalise an index of a periodic array.
Definition xstrides.hpp:909
@ bytes
Normal stride in bytes.
@ internal
As used internally (with stride(axis) == 0 if shape(axis) == 1)
@ normal
Normal stride corresponding to storage.
standard mathematical functions for xexpressions
bool in_bounds(const S &shape, Args &... args)
Check if the index is within the bounds of the array.
layout_type
Definition xlayout.hpp:24
auto view(E &&e, S &&... slices)
Constructs and returns a view on the specified xexpression.
Definition xview.hpp:1834