10#ifndef XTENSOR_CHUNKED_ASSIGN_HPP
11#define XTENSOR_CHUNKED_ASSIGN_HPP
13#include "xnoalias.hpp"
14#include "xstrided_view.hpp"
23 template <
class T,
class chunk_storage>
28 using temporary_type = T;
30 template <
class E,
class DST>
45 using temporary_type =
typename base_type::temporary_type;
53 template <
class E,
class F>
89 struct is_xchunked_array : std::false_type
94 struct is_xchunked_array<xchunked_array<CS>> : std::true_type
99 struct is_xchunked_view : std::false_type
104 struct is_xchunked_view<xchunked_view<E>> : std::true_type
108 struct invalid_chunk_iterator
113 struct xchunk_iterator_array
115 using reference =
decltype(*(std::declval<A>().chunks().begin()));
119 using difference_type =
typename A::difference_type;
120 return *(arr.chunks().begin() +
static_cast<difference_type
>(i));
125 struct xchunk_iterator_view
128 std::declval<V>().expression(),
129 std::declval<xstrided_slice_vector>()
139 struct xchunk_iterator_base
140 : std::conditional_t<
141 is_xchunked_array<std::decay_t<T>>::value,
142 xchunk_iterator_array<T>,
143 std::conditional_t<is_xchunked_view<std::decay_t<T>>::value, xchunk_iterator_view<T>, invalid_chunk_iterator>>
153 using base_type = detail::xchunk_iterator_base<E>;
155 using size_type =
typename E::size_type;
156 using shape_type =
typename E::shape_type;
159 using reference =
typename base_type::reference;
160 using value_type = std::remove_reference_t<reference>;
161 using pointer = value_type*;
162 using difference_type =
typename E::difference_type;
163 using iterator_category = std::forward_iterator_tag;
171 decltype(
auto)
operator*()
const;
176 const shape_type& chunk_index()
const;
178 const slice_vector& get_slice_vector()
const;
179 slice_vector get_chunk_slice_vector()
const;
183 void fill_slice_vector(size_type index);
185 E* p_chunked_expression;
186 shape_type m_chunk_index;
187 size_type m_chunk_linear_index;
195 template <
class T,
class CS>
196 template <
class E,
class DST>
205 inline auto xchunked_semantic<D>::assign_xexpression(
const xexpression<E>& e) -> derived_type&
207 auto& d = this->derived_cast();
208 const auto& chunk_shape = d.chunk_shape();
210 auto it_end = d.chunk_end();
211 for (
auto it = d.chunk_begin(); it != it_end; ++it, ++i)
213 auto rhs =
strided_view(e.derived_cast(), it.get_slice_vector());
214 if (rhs.shape() != chunk_shape)
216 noalias(
strided_view(*it, it.get_chunk_slice_vector())) = rhs;
224 return this->derived_cast();
229 inline auto xchunked_semantic<D>::computed_assign(
const xexpression<E>& e) -> derived_type&
231 D& d = this->derived_cast();
232 if (e.derived_cast().dimension() > d.dimension() || e.derived_cast().shape() > d.shape())
238 return assign_xexpression(e);
243 template <
class E,
class F>
244 inline auto xchunked_semantic<D>::scalar_computed_assign(
const E& e, F&& f) -> derived_type&
246 for (
auto& c : this->derived_cast().chunks())
248 c.scalar_computed_assign(e, f);
250 return this->derived_cast();
255 inline auto xchunked_semantic<D>::operator=(
const xexpression<E>& e) -> derived_type&
257 D& d = this->derived_cast();
258 get_assigner(d.chunks()).build_and_assign_temporary(e, d);
264 inline auto xchunked_semantic<D>::get_assigner(
const CS&)
const -> xchunked_assigner<temporary_type, CS>
266 return xchunked_assigner<temporary_type, CS>();
274 inline xchunk_iterator<E>::xchunk_iterator(E& expression, shape_type&& chunk_index, size_type chunk_linear_index)
275 : p_chunked_expression(&expression)
276 , m_chunk_index(std::move(chunk_index))
277 , m_chunk_linear_index(chunk_linear_index)
278 , m_slice_vector(m_chunk_index.size())
280 for (size_type i = 0; i < m_chunk_index.size(); ++i)
282 fill_slice_vector(i);
287 inline xchunk_iterator<E>& xchunk_iterator<E>::operator++()
289 if (m_chunk_linear_index + 1u != p_chunked_expression->grid_size())
291 size_type i = p_chunked_expression->dimension();
295 if (m_chunk_index[i] + 1u == p_chunked_expression->grid_shape()[i])
297 m_chunk_index[i] = 0;
298 fill_slice_vector(i);
302 m_chunk_index[i] += 1;
303 fill_slice_vector(i);
308 m_chunk_linear_index++;
313 inline xchunk_iterator<E> xchunk_iterator<E>::operator++(
int)
315 xchunk_iterator<E> it = *
this;
321 inline decltype(
auto) xchunk_iterator<E>::operator*()
const
323 return base_type::get_chunk(*p_chunked_expression, m_chunk_linear_index, m_slice_vector);
327 inline bool xchunk_iterator<E>::operator==(
const xchunk_iterator& other)
const
329 return m_chunk_linear_index == other.m_chunk_linear_index;
333 inline bool xchunk_iterator<E>::operator!=(
const xchunk_iterator& other)
const
335 return !(*
this == other);
339 inline auto xchunk_iterator<E>::get_slice_vector() const -> const slice_vector&
341 return m_slice_vector;
345 auto xchunk_iterator<E>::chunk_index() const -> const shape_type&
347 return m_chunk_index;
351 inline auto xchunk_iterator<E>::get_chunk_slice_vector() const -> slice_vector
353 slice_vector slices(m_chunk_index.size());
354 for (size_type i = 0; i < m_chunk_index.size(); ++i)
356 size_type chunk_shape = p_chunked_expression->chunk_shape()[i];
357 size_type end = std::min(
359 p_chunked_expression->shape()[i] - m_chunk_index[i] * chunk_shape
361 slices[i] =
range(0u, end);
367 inline void xchunk_iterator<E>::fill_slice_vector(size_type i)
369 size_type range_start = m_chunk_index[i] * p_chunked_expression->chunk_shape()[i];
370 size_type range_end = std::min(
371 (m_chunk_index[i] + 1) * p_chunked_expression->chunk_shape()[i],
372 p_chunked_expression->shape()[i]
374 m_slice_vector[i] =
range(range_start, range_end);
Base interface for assignable xexpressions.
standard mathematical functions for xexpressions
auto range(A start_val, B stop_val)
Select a range from start_val to stop_val (excluded).
std::vector< xstrided_slice< std::ptrdiff_t > > xstrided_slice_vector
vector of slices used to build a xstrided_view
auto strided_view(E &&e, S &&shape, X &&stride, std::size_t offset=0, layout_type layout=L) noexcept
Construct a strided view from an xexpression, shape, strides and offset.
auto view(E &&e, S &&... slices)
Constructs and returns a view on the specified xexpression.