xtensor
 
Loading...
Searching...
No Matches
xstrides.hpp
1/***************************************************************************
2 * Copyright (c) Johan Mabille, Sylvain Corlay and Wolf Vollprecht *
3 * Copyright (c) QuantStack *
4 * *
5 * Distributed under the terms of the BSD 3-Clause License. *
6 * *
7 * The full license is in the file LICENSE, distributed with this software. *
8 ****************************************************************************/
9
10#ifndef XTENSOR_STRIDES_HPP
11#define XTENSOR_STRIDES_HPP
12
13#include <cstddef>
14#include <functional>
15#include <limits>
16#include <numeric>
17
18#include <xtl/xsequence.hpp>
19
20#include "../core/xshape.hpp"
21#include "../core/xtensor_config.hpp"
22#include "../core/xtensor_forward.hpp"
23#include "../utils/xexception.hpp"
24
25namespace xt
26{
27
28 template <class shape_type>
29 std::size_t compute_size(const shape_type& shape) noexcept;
30
34
35 /***************
36 * data offset *
37 ***************/
38
39 template <class offset_type, class S>
40 offset_type data_offset(const S& strides) noexcept;
41
66 template <class offset_type, class S, class Arg, class... Args>
67 offset_type data_offset(const S& strides, Arg arg, Args... args) noexcept;
68
69 template <class offset_type, layout_type L = layout_type::dynamic, class S, class... Args>
70 offset_type unchecked_data_offset(const S& strides, Args... args) noexcept;
71
72 template <class offset_type, class S, class It>
73 offset_type element_offset(const S& strides, It first, It last) noexcept;
74
75 /*******************
76 * strides builder *
77 *******************/
78
88 template <layout_type L = layout_type::dynamic, class shape_type, class strides_type>
89 std::size_t compute_strides(const shape_type& shape, layout_type l, strides_type& strides);
90
91 template <layout_type L = layout_type::dynamic, class shape_type, class strides_type, class backstrides_type>
92 std::size_t
93 compute_strides(const shape_type& shape, layout_type l, strides_type& strides, backstrides_type& backstrides);
94
95 template <class shape_type, class strides_type>
96 void adapt_strides(const shape_type& shape, strides_type& strides) noexcept;
97
98 template <class shape_type, class strides_type, class backstrides_type>
99 void adapt_strides(const shape_type& shape, strides_type& strides, backstrides_type& backstrides) noexcept;
100
101 /*****************
102 * unravel_index *
103 *****************/
104
105 template <class S>
106 S unravel_from_strides(typename S::value_type index, const S& strides, layout_type l = layout_type::row_major);
107
108 template <class S>
109 get_strides_t<S>
110 unravel_index(typename S::value_type index, const S& shape, layout_type l = layout_type::row_major);
111
112 template <class S, class T>
113 std::vector<get_strides_t<S>>
114 unravel_indices(const T& indices, const S& shape, layout_type l = layout_type::row_major);
115
116 /***********************
117 * broadcast functions *
118 ***********************/
119
120 template <class S, class size_type>
121 S uninitialized_shape(size_type size);
122
123 template <class S1, class S2>
124 bool broadcast_shape(const S1& input, S2& output);
125
126 template <class S1, class S2>
127 bool broadcastable(const S1& s1, S2& s2);
128
129 /*************************
130 * check strides overlap *
131 *************************/
132
133 template <layout_type L>
135
136 /**********************************
137 * check bounds, without throwing *
138 **********************************/
139
148 template <class S, class... Args>
149 bool in_bounds(const S& shape, Args&... args);
150
151 /********************************
152 * apply periodicity to indices *
153 *******************************/
154
163 template <class S, class... Args>
164 void normalize_periodic(const S& shape, Args&... args);
165
166 /********************************************
167 * utility functions for strided containers *
168 ********************************************/
169
170 template <class C, class It, class size_type>
171 It strided_data_end(const C& c, It begin, layout_type l, size_type offset)
172 {
173 using difference_type = typename std::iterator_traits<It>::difference_type;
174 if (c.dimension() == 0)
175 {
176 ++begin;
177 }
178 else
179 {
180 for (std::size_t i = 0; i != c.dimension(); ++i)
181 {
182 begin += c.strides()[i] * difference_type(c.shape()[i] - 1);
183 }
184 if (l == layout_type::row_major)
185 {
186 begin += c.strides().back();
187 }
188 else
189 {
190 if (offset == 0)
191 {
192 begin += c.strides().front();
193 }
194 }
195 }
196 return begin;
197 }
198
199 /***********
200 * strides *
201 ***********/
202
203 namespace detail
204 {
205 template <class return_type, class S, class T, class D>
206 inline return_type compute_stride_impl(layout_type layout, const S& shape, T axis, D default_stride)
207 {
208 if (layout == layout_type::row_major)
209 {
210 return std::accumulate(
211 shape.cbegin() + axis + 1,
212 shape.cend(),
213 static_cast<return_type>(1),
214 std::multiplies<return_type>()
215 );
216 }
217 if (layout == layout_type::column_major)
218 {
219 return std::accumulate(
220 shape.cbegin(),
221 shape.cbegin() + axis,
222 static_cast<return_type>(1),
223 std::multiplies<return_type>()
224 );
225 }
226 return default_stride;
227 }
228 }
229
234 enum class stride_type
235 {
237 normal = 1,
238 bytes = 2,
239 };
240
249 template <class E>
250 inline auto strides(const E& e, stride_type type = stride_type::normal) noexcept
251 {
252 using strides_type = typename E::strides_type;
253 using return_type = typename strides_type::value_type;
254 strides_type ret = e.strides();
255 auto shape = e.shape();
256
257 if (type == stride_type::internal)
258 {
259 return ret;
260 }
261
262 for (std::size_t i = 0; i < ret.size(); ++i)
263 {
264 if (shape[i] == 1)
265 {
266 ret[i] = detail::compute_stride_impl<return_type>(e.layout(), shape, i, ret[i]);
267 }
268 }
269
270 if (type == stride_type::bytes)
271 {
272 return_type f = static_cast<return_type>(sizeof(typename E::value_type));
273 std::for_each(
274 ret.begin(),
275 ret.end(),
276 [f](auto& c)
277 {
278 c *= f;
279 }
280 );
281 }
282
283 return ret;
284 }
285
295 template <class E>
296 inline auto strides(const E& e, std::size_t axis, stride_type type = stride_type::normal) noexcept
297 {
298 using strides_type = typename E::strides_type;
299 using return_type = typename strides_type::value_type;
300
301 return_type ret = e.strides()[axis];
302
303 if (type == stride_type::internal)
304 {
305 return ret;
306 }
307
308 if (ret == 0)
309 {
310 if (e.shape(axis) == 1)
311 {
312 ret = detail::compute_stride_impl<return_type>(e.layout(), e.shape(), axis, ret);
313 }
314 }
315
316 if (type == stride_type::bytes)
317 {
318 return_type f = static_cast<return_type>(sizeof(typename E::value_type));
319 ret *= f;
320 }
321
322 return ret;
323 }
324
325 /******************
326 * Implementation *
327 ******************/
328
329 namespace detail
330 {
331 template <class shape_type>
332 inline std::size_t compute_size_impl(const shape_type& shape, std::true_type /* is signed */)
333 {
334 using size_type = std::decay_t<typename shape_type::value_type>;
335 return static_cast<std::size_t>(std::abs(
336 std::accumulate(shape.cbegin(), shape.cend(), size_type(1), std::multiplies<size_type>())
337 ));
338 }
339
340 template <class shape_type>
341 inline std::size_t compute_size_impl(const shape_type& shape, std::false_type /* is not signed */)
342 {
343 using size_type = std::decay_t<typename shape_type::value_type>;
344 return static_cast<std::size_t>(
345 std::accumulate(shape.cbegin(), shape.cend(), size_type(1), std::multiplies<size_type>())
346 );
347 }
348 }
349
350 template <class shape_type>
351 inline std::size_t compute_size(const shape_type& shape) noexcept
352 {
353 return detail::compute_size_impl(
354 shape,
355 xtl::is_signed<std::decay_t<typename std::decay_t<shape_type>::value_type>>()
356 );
357 }
358
359 namespace detail
360 {
361
362 template <std::size_t dim, class S>
363 inline auto raw_data_offset(const S&) noexcept
364 {
365 using strides_value_type = std::decay_t<decltype(std::declval<S>()[0])>;
366 return strides_value_type(0);
367 }
368
369 template <std::size_t dim, class S>
370 inline auto raw_data_offset(const S&, missing_type) noexcept
371 {
372 using strides_value_type = std::decay_t<decltype(std::declval<S>()[0])>;
373 return strides_value_type(0);
374 }
375
376 template <std::size_t dim, class S, class Arg, class... Args>
377 inline auto raw_data_offset(const S& strides, Arg arg, Args... args) noexcept
378 {
379 return static_cast<std::ptrdiff_t>(arg) * strides[dim] + raw_data_offset<dim + 1>(strides, args...);
380 }
381
382 template <layout_type L, std::ptrdiff_t static_dim>
383 struct layout_data_offset
384 {
385 template <std::size_t dim, class S, class Arg, class... Args>
386 inline static auto run(const S& strides, Arg arg, Args... args) noexcept
387 {
388 return raw_data_offset<dim>(strides, arg, args...);
389 }
390 };
391
392 template <std::ptrdiff_t static_dim>
393 struct layout_data_offset<layout_type::row_major, static_dim>
394 {
395 using self_type = layout_data_offset<layout_type::row_major, static_dim>;
396
397 template <std::size_t dim, class S, class Arg>
398 inline static auto run(const S& strides, Arg arg) noexcept
399 {
400 if (std::ptrdiff_t(dim) + 1 == static_dim)
401 {
402 return arg;
403 }
404 else
405 {
406 return arg * strides[dim];
407 }
408 }
409
410 template <std::size_t dim, class S, class Arg, class... Args>
411 inline static auto run(const S& strides, Arg arg, Args... args) noexcept
412 {
413 return arg * strides[dim] + self_type::template run<dim + 1>(strides, args...);
414 }
415 };
416
417 template <std::ptrdiff_t static_dim>
418 struct layout_data_offset<layout_type::column_major, static_dim>
419 {
420 using self_type = layout_data_offset<layout_type::column_major, static_dim>;
421
422 template <std::size_t dim, class S, class Arg>
423 inline static auto run(const S& strides, Arg arg) noexcept
424 {
425 if (dim == 0)
426 {
427 return arg;
428 }
429 else
430 {
431 return arg * strides[dim];
432 }
433 }
434
435 template <std::size_t dim, class S, class Arg, class... Args>
436 inline static auto run(const S& strides, Arg arg, Args... args) noexcept
437 {
438 if (dim == 0)
439 {
440 return arg + self_type::template run<dim + 1>(strides, args...);
441 }
442 else
443 {
444 return arg * strides[dim] + self_type::template run<dim + 1>(strides, args...);
445 }
446 }
447 };
448 }
449
450 template <class offset_type, class S>
451 inline offset_type data_offset(const S&) noexcept
452 {
453 return offset_type(0);
454 }
455
456 template <class offset_type, class S, class Arg, class... Args>
457 inline offset_type data_offset(const S& strides, Arg arg, Args... args) noexcept
458 {
459 constexpr std::size_t nargs = sizeof...(Args) + 1;
460 if (nargs == strides.size())
461 {
462 // Correct number of arguments: iterate
463 return static_cast<offset_type>(detail::raw_data_offset<0>(strides, arg, args...));
464 }
465 else if (nargs > strides.size())
466 {
467 // Too many arguments: drop the first
468 return data_offset<offset_type, S>(strides, args...);
469 }
470 else if (detail::last_type_is_missing<Args...>)
471 {
472 // Too few arguments & last argument xt::missing: postfix index with zeros
473 return static_cast<offset_type>(detail::raw_data_offset<0>(strides, arg, args...));
474 }
475 else
476 {
477 // Too few arguments: right to left scalar product
478 auto view = strides.cend() - nargs;
479 return static_cast<offset_type>(detail::raw_data_offset<0>(view, arg, args...));
480 }
481 }
482
483 template <class offset_type, layout_type L, class S, class... Args>
484 inline offset_type unchecked_data_offset(const S& strides, Args... args) noexcept
485 {
486 return static_cast<offset_type>(
487 detail::layout_data_offset<L, static_dimension<S>::value>::template run<0>(strides.cbegin(), args...)
488 );
489 }
490
491 template <class offset_type, class S, class It>
492 inline offset_type element_offset(const S& strides, It first, It last) noexcept
493 {
494 using difference_type = typename std::iterator_traits<It>::difference_type;
495 auto size = static_cast<difference_type>(
496 (std::min)(static_cast<typename S::size_type>(std::distance(first, last)), strides.size())
497 );
498 return std::inner_product(last - size, last, strides.cend() - size, offset_type(0));
499 }
500
501 namespace detail
502 {
503 template <class shape_type, class strides_type, class bs_ptr>
504 inline void adapt_strides(
505 const shape_type& shape,
506 strides_type& strides,
507 bs_ptr backstrides,
508 typename strides_type::size_type i
509 ) noexcept
510 {
511 if (shape[i] == 1)
512 {
513 strides[i] = 0;
514 }
515 (*backstrides)[i] = strides[i] * std::ptrdiff_t(shape[i] - 1);
516 }
517
518 template <class shape_type, class strides_type>
519 inline void adapt_strides(
520 const shape_type& shape,
521 strides_type& strides,
522 std::nullptr_t,
523 typename strides_type::size_type i
524 ) noexcept
525 {
526 if (shape[i] == 1)
527 {
528 strides[i] = 0;
529 }
530 }
531
532 template <layout_type L, class shape_type, class strides_type, class bs_ptr>
533 inline std::size_t
534 compute_strides(const shape_type& shape, layout_type l, strides_type& strides, bs_ptr bs)
535 {
536 using strides_value_type = typename std::decay_t<strides_type>::value_type;
537 strides_value_type data_size = 1;
538
539#if defined(_MSC_VER) && (1931 <= _MSC_VER)
540 // Workaround MSVC compiler optimization bug, xtensor#2568
541 if (0 == shape.size())
542 {
543 return static_cast<std::size_t>(data_size);
544 }
545#endif
546
548 {
549 for (std::size_t i = shape.size(); i != 0; --i)
550 {
551 strides[i - 1] = data_size;
552 data_size = strides[i - 1] * static_cast<strides_value_type>(shape[i - 1]);
553 adapt_strides(shape, strides, bs, i - 1);
554 }
555 }
556 else
557 {
558 for (std::size_t i = 0; i < shape.size(); ++i)
559 {
560 strides[i] = data_size;
561 data_size = strides[i] * static_cast<strides_value_type>(shape[i]);
562 adapt_strides(shape, strides, bs, i);
563 }
564 }
565 return static_cast<std::size_t>(data_size);
566 }
567 }
568
569 template <layout_type L, class shape_type, class strides_type>
570 inline std::size_t compute_strides(const shape_type& shape, layout_type l, strides_type& strides)
571 {
572 return detail::compute_strides<L>(shape, l, strides, nullptr);
573 }
574
575 template <layout_type L, class shape_type, class strides_type, class backstrides_type>
576 inline std::size_t
577 compute_strides(const shape_type& shape, layout_type l, strides_type& strides, backstrides_type& backstrides)
578 {
579 return detail::compute_strides<L>(shape, l, strides, &backstrides);
580 }
581
582 template <class T1, class T2>
583 inline bool
584 stride_match_condition(const T1& stride, const T2& shape, const T1& data_size, bool zero_strides)
585 {
586 return (shape == T2(1) && stride == T1(0) && zero_strides) || (stride == data_size);
587 }
588
589 // zero_strides should be true when strides are set to 0 if the corresponding dimensions are 1
590 template <class shape_type, class strides_type>
591 inline bool
592 do_strides_match(const shape_type& shape, const strides_type& strides, layout_type l, bool zero_strides)
593 {
594 using value_type = typename strides_type::value_type;
595 value_type data_size = 1;
596 if (l == layout_type::row_major)
597 {
598 for (std::size_t i = strides.size(); i != 0; --i)
599 {
600 if (!stride_match_condition(strides[i - 1], shape[i - 1], data_size, zero_strides))
601 {
602 return false;
603 }
604 data_size *= static_cast<value_type>(shape[i - 1]);
605 }
606 return true;
607 }
608 else if (l == layout_type::column_major)
609 {
610 for (std::size_t i = 0; i < strides.size(); ++i)
611 {
612 if (!stride_match_condition(strides[i], shape[i], data_size, zero_strides))
613 {
614 return false;
615 }
616 data_size *= static_cast<value_type>(shape[i]);
617 }
618 return true;
619 }
620 else
621 {
622 return false;
623 }
624 }
625
626 template <class shape_type, class strides_type>
627 inline void adapt_strides(const shape_type& shape, strides_type& strides) noexcept
628 {
629 for (typename shape_type::size_type i = 0; i < shape.size(); ++i)
630 {
631 detail::adapt_strides(shape, strides, nullptr, i);
632 }
633 }
634
635 template <class shape_type, class strides_type, class backstrides_type>
636 inline void
637 adapt_strides(const shape_type& shape, strides_type& strides, backstrides_type& backstrides) noexcept
638 {
639 for (typename shape_type::size_type i = 0; i < shape.size(); ++i)
640 {
641 detail::adapt_strides(shape, strides, &backstrides, i);
642 }
643 }
644
645 namespace detail
646 {
647 template <class S>
648 inline S unravel_noexcept(typename S::value_type idx, const S& strides, layout_type l) noexcept
649 {
650 using value_type = typename S::value_type;
651 using size_type = typename S::size_type;
652 S result = xtl::make_sequence<S>(strides.size(), 0);
653 if (l == layout_type::row_major)
654 {
655 for (size_type i = 0; i < strides.size(); ++i)
656 {
657 value_type str = strides[i];
658 value_type quot = str != 0 ? idx / str : 0;
659 idx = str != 0 ? idx % str : idx;
660 result[i] = quot;
661 }
662 }
663 else
664 {
665 for (size_type i = strides.size(); i != 0; --i)
666 {
667 value_type str = strides[i - 1];
668 value_type quot = str != 0 ? idx / str : 0;
669 idx = str != 0 ? idx % str : idx;
670 result[i - 1] = quot;
671 }
672 }
673 return result;
674 }
675 }
676
677 template <class S>
678 inline S unravel_from_strides(typename S::value_type index, const S& strides, layout_type l)
679 {
681 {
682 XTENSOR_THROW(std::runtime_error, "unravel_index: dynamic layout not supported");
683 }
684 return detail::unravel_noexcept(index, strides, l);
685 }
686
687 template <class S, class T>
688 inline get_value_type_t<T> ravel_from_strides(const T& index, const S& strides)
689 {
690 return element_offset<get_value_type_t<T>>(strides, index.begin(), index.end());
691 }
692
693 template <class S>
694 inline get_strides_t<S> unravel_index(typename S::value_type index, const S& shape, layout_type l)
695 {
696 using strides_type = get_strides_t<S>;
697 using strides_value_type = typename strides_type::value_type;
698 strides_type strides = xtl::make_sequence<strides_type>(shape.size(), 0);
699 compute_strides(shape, l, strides);
700 return unravel_from_strides(static_cast<strides_value_type>(index), strides, l);
701 }
702
703 template <class S, class T>
704 inline std::vector<get_strides_t<S>> unravel_indices(const T& idx, const S& shape, layout_type l)
705 {
706 using strides_type = get_strides_t<S>;
707 using strides_value_type = typename strides_type::value_type;
708 strides_type strides = xtl::make_sequence<strides_type>(shape.size(), 0);
709 compute_strides(shape, l, strides);
710 std::vector<get_strides_t<S>> out(idx.size());
711 auto out_iter = out.begin();
712 auto idx_iter = idx.begin();
713 for (; out_iter != out.end(); ++out_iter, ++idx_iter)
714 {
715 *out_iter = unravel_from_strides(static_cast<strides_value_type>(*idx_iter), strides, l);
716 }
717 return out;
718 }
719
720 template <class S, class T>
721 inline get_value_type_t<T> ravel_index(const T& index, const S& shape, layout_type l)
722 {
723 using strides_type = get_strides_t<S>;
724 strides_type strides = xtl::make_sequence<strides_type>(shape.size(), 0);
725 compute_strides(shape, l, strides);
726 return ravel_from_strides(index, strides);
727 }
728
729 template <class S, class stype>
730 inline S uninitialized_shape(stype size)
731 {
732 using value_type = typename S::value_type;
733 using size_type = typename S::size_type;
734 return xtl::make_sequence<S>(static_cast<size_type>(size), std::numeric_limits<value_type>::max());
735 }
736
737 template <class S1, class S2>
738 inline bool broadcast_shape(const S1& input, S2& output)
739 {
740 bool trivial_broadcast = (input.size() == output.size());
741 // Indices are faster than reverse iterators
742 using value_type = typename S2::value_type;
743 auto output_index = output.size();
744 auto input_index = input.size();
745
746 if (output_index < input_index)
747 {
748 throw_broadcast_error(output, input);
749 }
750 for (; input_index != 0; --input_index, --output_index)
751 {
752 // First case: output = (MAX, MAX, ...., MAX)
753 // output is a new shape that has not been through
754 // the broadcast process yet; broadcast is trivial
755 if (output[output_index - 1] == std::numeric_limits<value_type>::max())
756 {
757 output[output_index - 1] = static_cast<value_type>(input[input_index - 1]);
758 }
759 // Second case: output has been initialized to 1. Broadcast is trivial
760 // only if input is 1 to.
761 else if (output[output_index - 1] == 1)
762 {
763 output[output_index - 1] = static_cast<value_type>(input[input_index - 1]);
764 trivial_broadcast = trivial_broadcast && (input[input_index - 1] == 1);
765 }
766 // Third case: output has been initialized to something different from 1.
767 // if input is 1, then the broadcast is not trivial
768 else if (input[input_index - 1] == 1)
769 {
770 trivial_broadcast = false;
771 }
772 // Last case: input and output must have the same value, else
773 // shape are not compatible and an exception is thrown
774 else if (static_cast<value_type>(input[input_index - 1]) != output[output_index - 1])
775 {
776 throw_broadcast_error(output, input);
777 }
778 }
779 return trivial_broadcast;
780 }
781
782 template <class S1, class S2>
783 inline bool broadcastable(const S1& src_shape, const S2& dst_shape)
784 {
785 auto src_iter = src_shape.crbegin();
786 auto dst_iter = dst_shape.crbegin();
787 bool res = dst_shape.size() >= src_shape.size();
788 for (; src_iter != src_shape.crend() && res; ++src_iter, ++dst_iter)
789 {
790 res = (static_cast<std::size_t>(*src_iter) == static_cast<std::size_t>(*dst_iter))
791 || (*src_iter == 1);
792 }
793 return res;
794 }
795
796 template <>
798 {
799 template <class S1, class S2>
800 static std::size_t get(const S1& s1, const S2& s2)
801 {
802 using value_type = typename S1::value_type;
803 // Indices are faster than reverse iterators
804 auto s1_index = s1.size();
805 auto s2_index = s2.size();
806
807 for (; s2_index != 0; --s1_index, --s2_index)
808 {
809 if (static_cast<value_type>(s1[s1_index - 1]) != static_cast<value_type>(s2[s2_index - 1]))
810 {
811 break;
812 }
813 }
814 return s1_index;
815 }
816 };
817
818 template <>
820 {
821 template <class S1, class S2>
822 static std::size_t get(const S1& s1, const S2& s2)
823 {
824 // Indices are faster than reverse iterators
825 using size_type = typename S1::size_type;
826 using value_type = typename S1::value_type;
827 size_type index = 0;
828
829 // This check is necessary as column major "broadcasting" is still
830 // performed in a row major fashion
831 if (s1.size() != s2.size())
832 {
833 return 0;
834 }
835
836 auto size = s2.size();
837
838 for (; index < size; ++index)
839 {
840 if (static_cast<value_type>(s1[index]) != static_cast<value_type>(s2[index]))
841 {
842 break;
843 }
844 }
845 return index;
846 }
847 };
848
849 namespace detail
850 {
851 template <class S, std::size_t dim>
852 inline bool check_in_bounds_impl(const S&)
853 {
854 return true;
855 }
856
857 template <class S, std::size_t dim>
858 inline bool check_in_bounds_impl(const S&, missing_type)
859 {
860 return true;
861 }
862
863 template <class S, std::size_t dim, class T, class... Args>
864 inline bool check_in_bounds_impl(const S& shape, T& arg, Args&... args)
865 {
866 if (sizeof...(Args) + 1 > shape.size())
867 {
868 return check_in_bounds_impl<S, dim>(shape, args...);
869 }
870 else
871 {
872 return arg >= T(0) && arg < static_cast<T>(shape[dim])
873 && check_in_bounds_impl<S, dim + 1>(shape, args...);
874 }
875 }
876 }
877
878 template <class S, class... Args>
879 inline bool check_in_bounds(const S& shape, Args&... args)
880 {
881 return detail::check_in_bounds_impl<S, 0>(shape, args...);
882 }
883
884 namespace detail
885 {
886 template <class S, std::size_t dim>
887 inline void normalize_periodic_impl(const S&)
888 {
889 }
890
891 template <class S, std::size_t dim>
892 inline void normalize_periodic_impl(const S&, missing_type)
893 {
894 }
895
896 template <class S, std::size_t dim, class T, class... Args>
897 inline void normalize_periodic_impl(const S& shape, T& arg, Args&... args)
898 {
899 if (sizeof...(Args) + 1 > shape.size())
900 {
901 normalize_periodic_impl<S, dim>(shape, args...);
902 }
903 else
904 {
905 T n = static_cast<T>(shape[dim]);
906 arg = (n + (arg % n)) % n;
907 normalize_periodic_impl<S, dim + 1>(shape, args...);
908 }
909 }
910 }
911
912 template <class S, class... Args>
913 inline void normalize_periodic(const S& shape, Args&... args)
914 {
915 check_dimension(shape, args...);
916 detail::normalize_periodic_impl<S, 0>(shape, args...);
917 }
918}
919
920#endif
auto arg(E &&e) noexcept
Calculates the phase angle (in radians) elementwise for the complex numbers in e.
Definition xcomplex.hpp:221
std::size_t compute_strides(const shape_type &shape, layout_type l, strides_type &strides)
Compute the strides given the shape and the layout of an array.
Definition xstrides.hpp:570
auto strides(const E &e, stride_type type=stride_type::normal) noexcept
Get strides of an object.
Definition xstrides.hpp:250
stride_type
Choose stride type.
Definition xstrides.hpp:235
void normalize_periodic(const S &shape, Args &... args)
Normalise an index of a periodic array.
Definition xstrides.hpp:913
@ bytes
Normal stride in bytes.
Definition xstrides.hpp:238
@ internal
As used internally (with stride(axis) == 0 if shape(axis) == 1)
Definition xstrides.hpp:236
@ normal
Normal stride corresponding to storage.
Definition xstrides.hpp:237
standard mathematical functions for xexpressions
bool in_bounds(const S &shape, Args &... args)
Check if the index is within the bounds of the array.
layout_type
Definition xlayout.hpp:24
auto view(E &&e, S &&... slices)
Constructs and returns a view on the specified xexpression.
Definition xview.hpp:1824