xtensor
Loading...
Searching...
No Matches
xchunked_array.hpp
1/***************************************************************************
2 * Copyright (c) Johan Mabille, Sylvain Corlay and Wolf Vollprecht *
3 * Copyright (c) QuantStack *
4 * *
5 * Distributed under the terms of the BSD 3-Clause License. *
6 * *
7 * The full license is in the file LICENSE, distributed with this software. *
8 ****************************************************************************/
9
10#ifndef XTENSOR_CHUNKED_ARRAY_HPP
11#define XTENSOR_CHUNKED_ARRAY_HPP
12
13#include <array>
14#include <vector>
15
16#include "xarray.hpp"
17#include "xchunked_assign.hpp"
18
19namespace xt
20{
28 /******************************
29 * xchunked_array declaration *
30 ******************************/
31
32 template <class chunk_storage>
33 class xchunked_array;
34
35 template <class chunk_storage>
37 {
38 using chunk_type = typename chunk_storage::value_type;
39 using const_reference = typename chunk_type::const_reference;
40 using reference = typename chunk_type::reference;
41 using size_type = std::size_t;
42 using storage_type = chunk_type;
44 };
45
46 template <class chunk_storage>
48 {
49 using chunk_type = typename chunk_storage::value_type;
50 using inner_shape_type = typename chunk_type::shape_type;
53 };
54
55 template <class chunk_storage>
56 class xchunked_array : public xaccessible<xchunked_array<chunk_storage>>,
57 public xiterable<xchunked_array<chunk_storage>>,
58 public xchunked_semantic<xchunked_array<chunk_storage>>
59 {
60 public:
61
62 using chunk_storage_type = chunk_storage;
63 using chunk_type = typename chunk_storage::value_type;
64 using grid_shape_type = typename chunk_storage::shape_type;
65 using const_reference = typename chunk_type::const_reference;
66 using reference = typename chunk_type::reference;
70 using const_stepper = typename iterable_base::const_stepper;
71 using stepper = typename iterable_base::stepper;
73 using size_type = typename inner_types::size_type;
74 using storage_type = typename inner_types::storage_type;
75 using value_type = typename storage_type::value_type;
76 using pointer = value_type*;
77 using const_pointer = const value_type*;
78 using difference_type = std::ptrdiff_t;
79 using shape_type = typename chunk_type::shape_type;
80 using temporary_type = typename inner_types::temporary_type;
82 static constexpr layout_type static_layout = layout_type::dynamic;
83 static constexpr bool contiguous_layout = false;
86
87 template <class S>
89 chunk_storage_type&& chunks,
90 S&& shape,
91 S&& chunk_shape,
92 layout_type chunk_memory_layout = XTENSOR_DEFAULT_LAYOUT
93 );
94 ~xchunked_array() = default;
95
96 xchunked_array(const xchunked_array&) = default;
97 xchunked_array& operator=(const xchunked_array&) = default;
98
99 xchunked_array(xchunked_array&&) = default;
100 xchunked_array& operator=(xchunked_array&&) = default;
101
102 template <class E>
104 const xexpression<E>& e,
105 chunk_storage_type&& chunks,
106 layout_type chunk_memory_layout = XTENSOR_DEFAULT_LAYOUT
107 );
108
109 template <class E, class S>
111 const xexpression<E>& e,
112 chunk_storage_type&& chunks,
113 S&& chunk_shape,
114 layout_type chunk_memory_layout = XTENSOR_DEFAULT_LAYOUT
115 );
116
117 template <class E>
118 xchunked_array& operator=(const xexpression<E>& e);
119
120 size_type dimension() const noexcept;
121 const shape_type& shape() const noexcept;
122 layout_type layout() const noexcept;
123 bool is_contiguous() const noexcept;
124
125 template <class... Idxs>
126 reference operator()(Idxs... idxs);
127
128 template <class... Idxs>
129 const_reference operator()(Idxs... idxs) const;
130
131 template <class It>
132 reference element(It first, It last);
133
134 template <class It>
135 const_reference element(It first, It last) const;
136
137 template <class S>
138 bool broadcast_shape(S& s, bool reuse_cache = false) const;
139
140 template <class S>
141 bool has_linear_assign(const S& strides) const noexcept;
142
143 template <class S>
144 stepper stepper_begin(const S& shape) noexcept;
145 template <class S>
146 stepper stepper_end(const S& shape, layout_type) noexcept;
147
148 template <class S>
149 const_stepper stepper_begin(const S& shape) const noexcept;
150 template <class S>
151 const_stepper stepper_end(const S& shape, layout_type) const noexcept;
152
153 const shape_type& chunk_shape() const noexcept;
154 size_type grid_size() const noexcept;
155 const grid_shape_type& grid_shape() const noexcept;
156
157 chunk_storage_type& chunks();
158 const chunk_storage_type& chunks() const;
159
160 chunk_iterator chunk_begin();
161 chunk_iterator chunk_end();
162
163 const_chunk_iterator chunk_begin() const;
164 const_chunk_iterator chunk_end() const;
165 const_chunk_iterator chunk_cbegin() const;
166 const_chunk_iterator chunk_cend() const;
167
168 private:
169
170 template <class... Idxs>
171 using indexes_type = std::
172 pair<std::array<std::size_t, sizeof...(Idxs)>, std::array<std::size_t, sizeof...(Idxs)>>;
173
174 template <class... Idxs>
175 using chunk_indexes_type = std::array<std::pair<std::size_t, std::size_t>, sizeof...(Idxs)>;
176
177 template <std::size_t N>
178 using static_indexes_type = std::pair<std::array<std::size_t, N>, std::array<std::size_t, N>>;
179
180 using dynamic_indexes_type = std::pair<std::vector<std::size_t>, std::vector<std::size_t>>;
181
182 template <class S1, class S2>
183 void resize(S1&& shape, S2&& chunk_shape, layout_type chunk_memory_layout = XTENSOR_DEFAULT_LAYOUT);
184
185 template <class... Idxs>
186 indexes_type<Idxs...> get_indexes(Idxs... idxs) const;
187
188 template <class Idx>
189 std::pair<std::size_t, std::size_t> get_chunk_indexes_in_dimension(std::size_t dim, Idx idx) const;
190
191 template <std::size_t... dims, class... Idxs>
192 chunk_indexes_type<Idxs...> get_chunk_indexes(std::index_sequence<dims...>, Idxs... idxs) const;
193
194 template <class T, std::size_t N>
195 static_indexes_type<N> unpack(const std::array<T, N>& arr) const;
196
197 template <class It>
198 dynamic_indexes_type get_indexes_dynamic(It first, It last) const;
199
200 shape_type m_shape;
201 shape_type m_chunk_shape;
202 chunk_storage_type m_chunks;
203 };
204
205 template <class E>
206 constexpr bool is_chunked(const xexpression<E>& e);
207
208 template <class E>
209 constexpr bool is_chunked();
210
228 template <class T, layout_type L = XTENSOR_DEFAULT_LAYOUT, class S>
230 chunked_array(S&& shape, S&& chunk_shape, layout_type chunk_memory_layout = XTENSOR_DEFAULT_LAYOUT);
231
232 template <class T, layout_type L = XTENSOR_DEFAULT_LAYOUT, class S>
234 std::initializer_list<S> shape,
235 std::initializer_list<S> chunk_shape,
236 layout_type chunk_memory_layout = XTENSOR_DEFAULT_LAYOUT
237 );
238
255 template <layout_type L = XTENSOR_DEFAULT_LAYOUT, class E, class S>
257 chunked_array(const xexpression<E>& e, S&& chunk_shape, layout_type chunk_memory_layout = XTENSOR_DEFAULT_LAYOUT);
258
274 template <layout_type L = XTENSOR_DEFAULT_LAYOUT, class E>
276 chunked_array(const xexpression<E>& e, layout_type chunk_memory_layout = XTENSOR_DEFAULT_LAYOUT);
277
278 /*******************************
279 * chunk_helper implementation *
280 *******************************/
281
282 namespace detail
283 {
284 // Workaround for VS2015
285 template <class E>
286 using try_chunk_shape = decltype(std::declval<E>().chunk_shape());
287
288 template <class E, template <class> class OP, class = void>
289 struct chunk_helper_impl
290 {
291 using is_chunked = std::false_type;
292
293 static const auto& chunk_shape(const xexpression<E>& e)
294 {
295 return e.derived_cast().shape();
296 }
297
298 template <class S1, class S2>
299 static void
300 resize(E& chunks, const S1& container_shape, const S2& chunk_shape, layout_type chunk_memory_layout)
301 {
302 chunks.resize(container_shape);
303 for (auto& c : chunks)
304 {
305 c.resize(chunk_shape, chunk_memory_layout);
306 }
307 }
308 };
309
310 template <class E, template <class> class OP>
311 struct chunk_helper_impl<E, OP, void_t<OP<E>>>
312 {
313 using is_chunked = std::true_type;
314
315 static const auto& chunk_shape(const xexpression<E>& e)
316 {
317 return e.derived_cast().chunk_shape();
318 }
319
320 template <class S1, class S2>
321 static void
322 resize(E& chunks, const S1& container_shape, const S2& /*chunk_shape*/, layout_type /*chunk_memory_layout*/)
323 {
324 chunks.resize(container_shape);
325 }
326 };
327
328 template <class E>
329 using chunk_helper = chunk_helper_impl<E, try_chunk_shape>;
330 }
331
332 template <class E>
333 constexpr bool is_chunked(const xexpression<E>&)
334 {
335 return is_chunked<E>();
336 }
337
338 template <class E>
339 constexpr bool is_chunked()
340 {
341 using return_type = typename detail::chunk_helper<E>::is_chunked;
342 return return_type::value;
343 }
344
345 template <class T, layout_type L, class S>
346 inline xchunked_array<xarray<xarray<T>>>
348 {
352 std::forward<S>(shape),
353 std::forward<S>(chunk_shape),
355 );
356 }
357
358 template <class T, layout_type L, class S>
359 xchunked_array<xarray<xarray<T>>>
360 chunked_array(std::initializer_list<S> shape, std::initializer_list<S> chunk_shape, layout_type chunk_memory_layout)
361 {
362 using sh_type = std::vector<std::size_t>;
363 auto sh = xtl::forward_sequence<sh_type, std::initializer_list<S>>(shape);
364 auto ch_sh = xtl::forward_sequence<sh_type, std::initializer_list<S>>(chunk_shape);
365 return chunked_array<T, L, sh_type>(std::move(sh), std::move(ch_sh), chunk_memory_layout);
366 }
367
368 template <layout_type L, class E, class S>
369 inline xchunked_array<xarray<xarray<typename E::value_type>>>
375
376 template <layout_type L, class E>
377 inline xchunked_array<xarray<xarray<typename E::value_type>>>
383
384 /*********************************
385 * xchunked_array implementation *
386 *********************************/
387
388 template <class CS>
389 template <class S>
390 inline xchunked_array<CS>::xchunked_array(CS&& chunks, S&& shape, S&& chunk_shape, layout_type chunk_memory_layout)
391 : m_chunks(std::move(chunks))
392 {
393 resize(std::forward<S>(shape), std::forward<S>(chunk_shape), chunk_memory_layout);
394 }
395
396 template <class CS>
397 template <class E>
398 inline xchunked_array<CS>::xchunked_array(const xexpression<E>& e, CS&& chunks, layout_type chunk_memory_layout)
399 : xchunked_array(e, std::move(chunks), detail::chunk_helper<E>::chunk_shape(e), chunk_memory_layout)
400 {
401 }
402
403 template <class CS>
404 template <class E, class S>
405 inline xchunked_array<CS>::xchunked_array(
406 const xexpression<E>& e,
407 CS&& chunks,
408 S&& chunk_shape,
409 layout_type chunk_memory_layout
410 )
411 : m_chunks(std::move(chunks))
412 {
413 resize(e.derived_cast().shape(), std::forward<S>(chunk_shape), chunk_memory_layout);
414 semantic_base::assign_xexpression(e);
415 }
416
417 template <class CS>
418 template <class E>
419 inline auto xchunked_array<CS>::operator=(const xexpression<E>& e) -> self_type&
420 {
421 return semantic_base::operator=(e);
422 }
423
424 template <class CS>
425 inline auto xchunked_array<CS>::dimension() const noexcept -> size_type
426 {
427 return m_shape.size();
428 }
429
430 template <class CS>
431 inline auto xchunked_array<CS>::shape() const noexcept -> const shape_type&
432 {
433 return m_shape;
434 }
435
436 template <class CS>
437 inline auto xchunked_array<CS>::layout() const noexcept -> layout_type
438 {
439 return static_layout;
440 }
441
442 template <class CS>
443 inline bool xchunked_array<CS>::is_contiguous() const noexcept
444 {
445 return false;
446 }
447
448 template <class CS>
449 template <class... Idxs>
450 inline auto xchunked_array<CS>::operator()(Idxs... idxs) -> reference
451 {
452 auto ii = get_indexes(idxs...);
453 auto& chunk = m_chunks.element(ii.first.cbegin(), ii.first.cend());
454 return chunk.element(ii.second.cbegin(), ii.second.cend());
455 }
456
457 template <class CS>
458 template <class... Idxs>
459 inline auto xchunked_array<CS>::operator()(Idxs... idxs) const -> const_reference
460 {
461 auto ii = get_indexes(idxs...);
462 auto& chunk = m_chunks.element(ii.first.cbegin(), ii.first.cend());
463 return chunk.element(ii.second.cbegin(), ii.second.cend());
464 }
465
466 template <class CS>
467 template <class It>
468 inline auto xchunked_array<CS>::element(It first, It last) -> reference
469 {
470 auto ii = get_indexes_dynamic(first, last);
471 auto& chunk = m_chunks.element(ii.first.begin(), ii.first.end());
472 return chunk.element(ii.second.begin(), ii.second.end());
473 }
474
475 template <class CS>
476 template <class It>
477 inline auto xchunked_array<CS>::element(It first, It last) const -> const_reference
478 {
479 auto ii = get_indexes_dynamic(first, last);
480 auto& chunk = m_chunks.element(ii.first.begin(), ii.first.end());
481 return chunk.element(ii.second.begin(), ii.second.end());
482 }
483
484 template <class CS>
485 template <class S>
486 inline bool xchunked_array<CS>::broadcast_shape(S& s, bool) const
487 {
488 return xt::broadcast_shape(shape(), s);
489 }
490
491 template <class CS>
492 template <class S>
493 inline bool xchunked_array<CS>::has_linear_assign(const S&) const noexcept
494 {
495 return false;
496 }
497
498 template <class CS>
499 template <class S>
500 inline auto xchunked_array<CS>::stepper_begin(const S& shape) noexcept -> stepper
501 {
502 size_type offset = shape.size() - this->dimension();
503 return stepper(this, offset);
504 }
505
506 template <class CS>
507 template <class S>
508 inline auto xchunked_array<CS>::stepper_end(const S& shape, layout_type) noexcept -> stepper
509 {
510 size_type offset = shape.size() - this->dimension();
511 return stepper(this, offset, true);
512 }
513
514 template <class CS>
515 template <class S>
516 inline auto xchunked_array<CS>::stepper_begin(const S& shape) const noexcept -> const_stepper
517 {
518 size_type offset = shape.size() - this->dimension();
519 return const_stepper(this, offset);
520 }
521
522 template <class CS>
523 template <class S>
524 inline auto xchunked_array<CS>::stepper_end(const S& shape, layout_type) const noexcept -> const_stepper
525 {
526 size_type offset = shape.size() - this->dimension();
527 return const_stepper(this, offset, true);
528 }
529
530 template <class CS>
531 inline auto xchunked_array<CS>::chunk_shape() const noexcept -> const shape_type&
532 {
533 return m_chunk_shape;
534 }
535
536 template <class CS>
537 inline auto xchunked_array<CS>::grid_size() const noexcept -> size_type
538 {
539 return m_chunks.size();
540 }
541
542 template <class CS>
543 inline auto xchunked_array<CS>::grid_shape() const noexcept -> const grid_shape_type&
544 {
545 return m_chunks.shape();
546 }
547
548 template <class CS>
549 inline auto xchunked_array<CS>::chunks() -> chunk_storage_type&
550 {
551 return m_chunks;
552 }
553
554 template <class CS>
555 inline auto xchunked_array<CS>::chunks() const -> const chunk_storage_type&
556 {
557 return m_chunks;
558 }
559
560 template <class CS>
561 inline auto xchunked_array<CS>::chunk_begin() -> chunk_iterator
562 {
563 shape_type chunk_index(m_shape.size(), size_type(0));
564 return chunk_iterator(*this, std::move(chunk_index), 0u);
565 }
566
567 template <class CS>
568 inline auto xchunked_array<CS>::chunk_end() -> chunk_iterator
569 {
570 shape_type sh = xtl::forward_sequence<shape_type, const grid_shape_type>(grid_shape());
571 return chunk_iterator(*this, std::move(sh), grid_size());
572 }
573
574 template <class CS>
575 inline auto xchunked_array<CS>::chunk_begin() const -> const_chunk_iterator
576 {
577 shape_type chunk_index(m_shape.size(), size_type(0));
578 return const_chunk_iterator(*this, std::move(chunk_index), 0u);
579 }
580
581 template <class CS>
582 inline auto xchunked_array<CS>::chunk_end() const -> const_chunk_iterator
583 {
584 shape_type sh = xtl::forward_sequence<shape_type, const grid_shape_type>(grid_shape());
585 return const_chunk_iterator(*this, std::move(sh), grid_size());
586 }
587
588 template <class CS>
589 inline auto xchunked_array<CS>::chunk_cbegin() const -> const_chunk_iterator
590 {
591 return chunk_begin();
592 }
593
594 template <class CS>
595 inline auto xchunked_array<CS>::chunk_cend() const -> const_chunk_iterator
596 {
597 return chunk_end();
598 }
599
600 template <class CS>
601 template <class S1, class S2>
602 inline void xchunked_array<CS>::resize(S1&& shape, S2&& chunk_shape, layout_type chunk_memory_layout)
603 {
604 // compute chunk number in each dimension (shape_of_chunks)
605 std::vector<std::size_t> shape_of_chunks(shape.size());
606 std::transform(
607 shape.cbegin(),
608 shape.cend(),
609 chunk_shape.cbegin(),
610 shape_of_chunks.begin(),
611 [](auto s, auto cs)
612 {
613 std::size_t cn = s / cs;
614 if (s % cs > 0)
615 {
616 cn += std::size_t(1); // edge_chunk
617 }
618 return cn;
619 }
620 );
621
622 detail::chunk_helper<CS>::resize(m_chunks, shape_of_chunks, chunk_shape, chunk_memory_layout);
623
624 m_shape = xtl::forward_sequence<shape_type, S1>(shape);
625 m_chunk_shape = xtl::forward_sequence<shape_type, S2>(chunk_shape);
626 }
627
628 template <class CS>
629 template <class... Idxs>
630 inline auto xchunked_array<CS>::get_indexes(Idxs... idxs) const -> indexes_type<Idxs...>
631 {
632 auto chunk_indexes_packed = get_chunk_indexes(std::make_index_sequence<sizeof...(Idxs)>(), idxs...);
633 return unpack(chunk_indexes_packed);
634 }
635
636 template <class CS>
637 template <class Idx>
638 inline std::pair<std::size_t, std::size_t>
639 xchunked_array<CS>::get_chunk_indexes_in_dimension(std::size_t dim, Idx idx) const
640 {
641 std::size_t index_of_chunk = static_cast<size_t>(idx) / m_chunk_shape[dim];
642 std::size_t index_in_chunk = static_cast<size_t>(idx) - index_of_chunk * m_chunk_shape[dim];
643 return std::make_pair(index_of_chunk, index_in_chunk);
644 }
645
646 template <class CS>
647 template <std::size_t... dims, class... Idxs>
648 inline auto xchunked_array<CS>::get_chunk_indexes(std::index_sequence<dims...>, Idxs... idxs) const
649 -> chunk_indexes_type<Idxs...>
650 {
651 chunk_indexes_type<Idxs...> chunk_indexes = {{get_chunk_indexes_in_dimension(dims, idxs)...}};
652 return chunk_indexes;
653 }
654
655 template <class CS>
656 template <class T, std::size_t N>
657 inline auto xchunked_array<CS>::unpack(const std::array<T, N>& arr) const -> static_indexes_type<N>
658 {
659 std::array<std::size_t, N> arr0;
660 std::array<std::size_t, N> arr1;
661 for (std::size_t i = 0; i < N; ++i)
662 {
663 arr0[i] = std::get<0>(arr[i]);
664 arr1[i] = std::get<1>(arr[i]);
665 }
666 return std::make_pair(arr0, arr1);
667 }
668
669 template <class CS>
670 template <class It>
671 inline auto xchunked_array<CS>::get_indexes_dynamic(It first, It last) const -> dynamic_indexes_type
672 {
673 auto size = static_cast<std::size_t>(std::distance(first, last));
674 std::vector<std::size_t> indexes_of_chunk(size);
675 std::vector<std::size_t> indexes_in_chunk(size);
676 for (std::size_t dim = 0; dim < size; ++dim)
677 {
678 auto chunk_index = get_chunk_indexes_in_dimension(dim, *first++);
679 indexes_of_chunk[dim] = chunk_index.first;
680 indexes_in_chunk[dim] = chunk_index.second;
681 }
682 return std::make_pair(indexes_of_chunk, indexes_in_chunk);
683 }
684}
685
686#endif
Base class for implementation of common expression access methods.
Base class for multidimensional iterable constant expressions.
Definition xiterable.hpp:37
Base class for multidimensional iterable expressions.
xchunked_array< xarray< xarray< T > > > chunked_array(S &&shape, S &&chunk_shape, layout_type chunk_memory_layout=::xt::layout_type::row_major)
Creates an in-memory chunked array.
auto strides(const E &e, stride_type type=stride_type::normal) noexcept
Get strides of an object.
Definition xstrides.hpp:248
standard mathematical functions for xexpressions
layout_type
Definition xlayout.hpp:24