10#ifndef XTENSOR_CHUNKED_ARRAY_HPP
11#define XTENSOR_CHUNKED_ARRAY_HPP
17#include "xchunked_assign.hpp"
32 template <
class chunk_storage>
35 template <
class chunk_storage>
38 using chunk_type =
typename chunk_storage::value_type;
39 using const_reference =
typename chunk_type::const_reference;
40 using reference =
typename chunk_type::reference;
41 using size_type = std::size_t;
42 using storage_type = chunk_type;
46 template <
class chunk_storage>
49 using chunk_type =
typename chunk_storage::value_type;
50 using inner_shape_type =
typename chunk_type::shape_type;
55 template <
class chunk_storage>
57 public xiterable<xchunked_array<chunk_storage>>,
63 using chunk_type =
typename chunk_storage::value_type;
64 using grid_shape_type =
typename chunk_storage::shape_type;
65 using const_reference =
typename chunk_type::const_reference;
66 using reference =
typename chunk_type::reference;
70 using const_stepper =
typename iterable_base::const_stepper;
71 using stepper =
typename iterable_base::stepper;
73 using size_type =
typename inner_types::size_type;
74 using storage_type =
typename inner_types::storage_type;
75 using value_type =
typename storage_type::value_type;
76 using pointer = value_type*;
77 using const_pointer =
const value_type*;
78 using difference_type = std::ptrdiff_t;
79 using shape_type =
typename chunk_type::shape_type;
80 using temporary_type =
typename inner_types::temporary_type;
83 static constexpr bool contiguous_layout =
false;
89 chunk_storage_type&& chunks,
105 chunk_storage_type&& chunks,
109 template <
class E,
class S>
112 chunk_storage_type&& chunks,
120 size_type dimension()
const noexcept;
121 const shape_type& shape()
const noexcept;
123 bool is_contiguous()
const noexcept;
125 template <
class...
Idxs>
128 template <
class...
Idxs>
129 const_reference operator()(
Idxs...
idxs)
const;
141 bool has_linear_assign(
const S&
strides)
const noexcept;
144 stepper stepper_begin(
const S& shape)
noexcept;
146 stepper stepper_end(
const S& shape,
layout_type)
noexcept;
149 const_stepper stepper_begin(
const S& shape)
const noexcept;
151 const_stepper stepper_end(
const S& shape,
layout_type)
const noexcept;
153 const shape_type& chunk_shape()
const noexcept;
154 size_type grid_size()
const noexcept;
155 const grid_shape_type& grid_shape()
const noexcept;
157 chunk_storage_type& chunks();
158 const chunk_storage_type& chunks()
const;
170 template <
class...
Idxs>
171 using indexes_type = std::
172 pair<std::array<std::size_t,
sizeof...(Idxs)>, std::array<std::size_t,
sizeof...(
Idxs)>>;
174 template <
class...
Idxs>
175 using chunk_indexes_type = std::array<std::pair<std::size_t, std::size_t>,
sizeof...(Idxs)>;
177 template <std::
size_t N>
178 using static_indexes_type = std::pair<std::array<std::size_t, N>, std::array<std::size_t, N>>;
180 using dynamic_indexes_type = std::pair<std::vector<std::size_t>, std::vector<std::size_t>>;
182 template <
class S1,
class S2>
185 template <
class...
Idxs>
189 std::pair<std::size_t, std::size_t> get_chunk_indexes_in_dimension(std::size_t
dim,
Idx idx)
const;
191 template <std::size_t...
dims,
class...
Idxs>
192 chunk_indexes_type<
Idxs...> get_chunk_indexes(std::index_sequence<dims...>,
Idxs...
idxs)
const;
194 template <
class T, std::
size_t N>
198 dynamic_indexes_type get_indexes_dynamic(
It first,
It last)
const;
201 shape_type m_chunk_shape;
202 chunk_storage_type m_chunks;
209 constexpr bool is_chunked();
228 template <
class T, layout_type L = XTENSOR_DEFAULT_LAYOUT,
class S>
232 template <
class T, layout_type L = XTENSOR_DEFAULT_LAYOUT,
class S>
234 std::initializer_list<S> shape,
235 std::initializer_list<S> chunk_shape,
255 template <layout_type L = XTENSOR_DEFAULT_LAYOUT,
class E,
class S>
274 template <layout_type L = XTENSOR_DEFAULT_LAYOUT,
class E>
286 using try_chunk_shape =
decltype(std::declval<E>().chunk_shape());
288 template <
class E,
template <
class>
class OP,
class =
void>
289 struct chunk_helper_impl
291 using is_chunked = std::false_type;
295 return e.derived_cast().shape();
298 template <
class S1,
class S2>
300 resize(E& chunks,
const S1& container_shape,
const S2& chunk_shape,
layout_type chunk_memory_layout)
302 chunks.resize(container_shape);
303 for (
auto& c : chunks)
305 c.resize(chunk_shape, chunk_memory_layout);
310 template <
class E,
template <
class>
class OP>
311 struct chunk_helper_impl<E, OP, void_t<OP<E>>>
313 using is_chunked = std::true_type;
315 static const auto& chunk_shape(
const xexpression<E>& e)
317 return e.derived_cast().chunk_shape();
320 template <
class S1,
class S2>
322 resize(E& chunks,
const S1& container_shape,
const S2& ,
layout_type )
324 chunks.resize(container_shape);
329 using chunk_helper = chunk_helper_impl<E, try_chunk_shape>;
333 constexpr bool is_chunked(
const xexpression<E>&)
335 return is_chunked<E>();
339 constexpr bool is_chunked()
341 using return_type =
typename detail::chunk_helper<E>::is_chunked;
342 return return_type::value;
345 template <
class T, layout_type L,
class S>
346 inline xchunked_array<xarray<xarray<T>>>
352 std::forward<S>(shape),
353 std::forward<S>(chunk_shape),
358 template <
class T, layout_type L,
class S>
359 xchunked_array<xarray<xarray<T>>>
360 chunked_array(std::initializer_list<S> shape, std::initializer_list<S> chunk_shape,
layout_type chunk_memory_layout)
362 using sh_type = std::vector<std::size_t>;
363 auto sh = xtl::forward_sequence<sh_type, std::initializer_list<S>>(shape);
364 auto ch_sh = xtl::forward_sequence<sh_type, std::initializer_list<S>>(chunk_shape);
365 return chunked_array<T, L, sh_type>(std::move(sh), std::move(ch_sh), chunk_memory_layout);
368 template <layout_type L,
class E,
class S>
369 inline xchunked_array<xarray<xarray<typename E::value_type>>>
376 template <layout_type L,
class E>
377 inline xchunked_array<xarray<xarray<typename E::value_type>>>
390 inline xchunked_array<CS>::xchunked_array(CS&& chunks, S&& shape, S&& chunk_shape,
layout_type chunk_memory_layout)
391 : m_chunks(std::move(chunks))
393 resize(std::forward<S>(shape), std::forward<S>(chunk_shape), chunk_memory_layout);
398 inline xchunked_array<CS>::xchunked_array(
const xexpression<E>& e, CS&& chunks, layout_type chunk_memory_layout)
399 : xchunked_array(e, std::move(chunks), detail::chunk_helper<E>::chunk_shape(e), chunk_memory_layout)
404 template <
class E,
class S>
405 inline xchunked_array<CS>::xchunked_array(
406 const xexpression<E>& e,
409 layout_type chunk_memory_layout
411 : m_chunks(std::move(chunks))
413 resize(e.derived_cast().shape(), std::forward<S>(chunk_shape), chunk_memory_layout);
414 semantic_base::assign_xexpression(e);
419 inline auto xchunked_array<CS>::operator=(
const xexpression<E>& e) -> self_type&
421 return semantic_base::operator=(e);
425 inline auto xchunked_array<CS>::dimension() const noexcept -> size_type
427 return m_shape.size();
431 inline auto xchunked_array<CS>::shape() const noexcept -> const shape_type&
437 inline auto xchunked_array<CS>::layout() const noexcept -> layout_type
439 return static_layout;
443 inline bool xchunked_array<CS>::is_contiguous() const noexcept
449 template <
class... Idxs>
450 inline auto xchunked_array<CS>::operator()(Idxs... idxs) -> reference
452 auto ii = get_indexes(idxs...);
453 auto& chunk = m_chunks.element(ii.first.cbegin(), ii.first.cend());
454 return chunk.element(ii.second.cbegin(), ii.second.cend());
458 template <
class... Idxs>
459 inline auto xchunked_array<CS>::operator()(Idxs... idxs)
const -> const_reference
461 auto ii = get_indexes(idxs...);
462 auto& chunk = m_chunks.element(ii.first.cbegin(), ii.first.cend());
463 return chunk.element(ii.second.cbegin(), ii.second.cend());
468 inline auto xchunked_array<CS>::element(It first, It last) -> reference
470 auto ii = get_indexes_dynamic(first, last);
471 auto& chunk = m_chunks.element(ii.first.begin(), ii.first.end());
472 return chunk.element(ii.second.begin(), ii.second.end());
477 inline auto xchunked_array<CS>::element(It first, It last)
const -> const_reference
479 auto ii = get_indexes_dynamic(first, last);
480 auto& chunk = m_chunks.element(ii.first.begin(), ii.first.end());
481 return chunk.element(ii.second.begin(), ii.second.end());
486 inline bool xchunked_array<CS>::broadcast_shape(S& s,
bool)
const
488 return xt::broadcast_shape(shape(), s);
493 inline bool xchunked_array<CS>::has_linear_assign(
const S&)
const noexcept
500 inline auto xchunked_array<CS>::stepper_begin(
const S& shape)
noexcept -> stepper
502 size_type offset = shape.size() - this->dimension();
503 return stepper(
this, offset);
508 inline auto xchunked_array<CS>::stepper_end(
const S& shape, layout_type)
noexcept -> stepper
510 size_type offset = shape.size() - this->dimension();
511 return stepper(
this, offset,
true);
516 inline auto xchunked_array<CS>::stepper_begin(
const S& shape)
const noexcept -> const_stepper
518 size_type offset = shape.size() - this->dimension();
519 return const_stepper(
this, offset);
524 inline auto xchunked_array<CS>::stepper_end(
const S& shape, layout_type)
const noexcept -> const_stepper
526 size_type offset = shape.size() - this->dimension();
527 return const_stepper(
this, offset,
true);
531 inline auto xchunked_array<CS>::chunk_shape() const noexcept -> const shape_type&
533 return m_chunk_shape;
537 inline auto xchunked_array<CS>::grid_size() const noexcept -> size_type
539 return m_chunks.size();
543 inline auto xchunked_array<CS>::grid_shape() const noexcept -> const grid_shape_type&
545 return m_chunks.shape();
549 inline auto xchunked_array<CS>::chunks() -> chunk_storage_type&
555 inline auto xchunked_array<CS>::chunks() const -> const chunk_storage_type&
561 inline auto xchunked_array<CS>::chunk_begin() -> chunk_iterator
563 shape_type chunk_index(m_shape.size(), size_type(0));
564 return chunk_iterator(*
this, std::move(chunk_index), 0u);
568 inline auto xchunked_array<CS>::chunk_end() -> chunk_iterator
570 shape_type sh = xtl::forward_sequence<shape_type, const grid_shape_type>(grid_shape());
571 return chunk_iterator(*
this, std::move(sh), grid_size());
575 inline auto xchunked_array<CS>::chunk_begin() const -> const_chunk_iterator
577 shape_type chunk_index(m_shape.size(), size_type(0));
578 return const_chunk_iterator(*
this, std::move(chunk_index), 0u);
582 inline auto xchunked_array<CS>::chunk_end() const -> const_chunk_iterator
584 shape_type sh = xtl::forward_sequence<shape_type, const grid_shape_type>(grid_shape());
585 return const_chunk_iterator(*
this, std::move(sh), grid_size());
589 inline auto xchunked_array<CS>::chunk_cbegin() const -> const_chunk_iterator
591 return chunk_begin();
595 inline auto xchunked_array<CS>::chunk_cend() const -> const_chunk_iterator
601 template <
class S1,
class S2>
602 inline void xchunked_array<CS>::resize(S1&& shape, S2&& chunk_shape, layout_type chunk_memory_layout)
605 std::vector<std::size_t> shape_of_chunks(shape.size());
609 chunk_shape.cbegin(),
610 shape_of_chunks.begin(),
613 std::size_t cn = s / cs;
616 cn += std::size_t(1);
622 detail::chunk_helper<CS>::resize(m_chunks, shape_of_chunks, chunk_shape, chunk_memory_layout);
624 m_shape = xtl::forward_sequence<shape_type, S1>(shape);
625 m_chunk_shape = xtl::forward_sequence<shape_type, S2>(chunk_shape);
629 template <
class... Idxs>
630 inline auto xchunked_array<CS>::get_indexes(Idxs... idxs)
const -> indexes_type<Idxs...>
632 auto chunk_indexes_packed = get_chunk_indexes(std::make_index_sequence<
sizeof...(Idxs)>(), idxs...);
633 return unpack(chunk_indexes_packed);
638 inline std::pair<std::size_t, std::size_t>
639 xchunked_array<CS>::get_chunk_indexes_in_dimension(std::size_t dim, Idx idx)
const
641 std::size_t index_of_chunk =
static_cast<size_t>(idx) / m_chunk_shape[dim];
642 std::size_t index_in_chunk =
static_cast<size_t>(idx) - index_of_chunk * m_chunk_shape[dim];
643 return std::make_pair(index_of_chunk, index_in_chunk);
647 template <std::size_t... dims,
class... Idxs>
648 inline auto xchunked_array<CS>::get_chunk_indexes(std::index_sequence<dims...>, Idxs... idxs)
const
649 -> chunk_indexes_type<Idxs...>
651 chunk_indexes_type<Idxs...> chunk_indexes = {{get_chunk_indexes_in_dimension(dims, idxs)...}};
652 return chunk_indexes;
656 template <
class T, std::
size_t N>
657 inline auto xchunked_array<CS>::unpack(
const std::array<T, N>& arr)
const -> static_indexes_type<N>
659 std::array<std::size_t, N> arr0;
660 std::array<std::size_t, N> arr1;
661 for (std::size_t i = 0; i < N; ++i)
663 arr0[i] = std::get<0>(arr[i]);
664 arr1[i] = std::get<1>(arr[i]);
666 return std::make_pair(arr0, arr1);
671 inline auto xchunked_array<CS>::get_indexes_dynamic(It first, It last)
const -> dynamic_indexes_type
673 auto size =
static_cast<std::size_t
>(std::distance(first, last));
674 std::vector<std::size_t> indexes_of_chunk(size);
675 std::vector<std::size_t> indexes_in_chunk(size);
676 for (std::size_t dim = 0; dim < size; ++dim)
678 auto chunk_index = get_chunk_indexes_in_dimension(dim, *first++);
679 indexes_of_chunk[dim] = chunk_index.first;
680 indexes_in_chunk[dim] = chunk_index.second;
682 return std::make_pair(indexes_of_chunk, indexes_in_chunk);
Base class for implementation of common expression access methods.
Base class for multidimensional iterable constant expressions.
Base class for multidimensional iterable expressions.
xchunked_array< xarray< xarray< T > > > chunked_array(S &&shape, S &&chunk_shape, layout_type chunk_memory_layout=::xt::layout_type::row_major)
Creates an in-memory chunked array.
auto strides(const E &e, stride_type type=stride_type::normal) noexcept
Get strides of an object.
standard mathematical functions for xexpressions