10#ifndef XTENSOR_ADAPT_HPP
11#define XTENSOR_ADAPT_HPP
18#include <xtl/xsequence.hpp>
21#include "xbuffer_adaptor.hpp"
34 struct array_size_impl;
36 template <
class T, std::
size_t N>
37 struct array_size_impl<std::array<T, N>>
39 static constexpr std::size_t value = N;
43 using array_size = array_size_impl<std::decay_t<C>>;
46 struct default_allocator_for_ptr
48 using type = std::allocator<std::remove_const_t<std::remove_pointer_t<std::remove_reference_t<P>>>>;
52 using default_allocator_for_ptr_t =
typename default_allocator_for_ptr<P>::type;
55 using not_an_array = xtl::negation<is_array<T>>;
58 using not_a_pointer = xtl::negation<std::is_pointer<T>>;
61 using not_a_layout = xtl::negation<std::is_same<layout_type, T>>;
83 XTL_REQUIRES(detail::not_an_array<std::decay_t<SC>>, detail::not_a_pointer<C>)>
87 static_assert(!xtl::is_integral<SC>::value,
"shape cannot be a integer");
89 return return_type(std::forward<C>(container), shape, l);
104 XTL_REQUIRES(detail::not_an_array<std::decay_t<SC>>, std::is_pointer<std::remove_reference_t<C>>)>
107 static_assert(!xtl::is_integral<SC>::value,
"shape cannot be a integer");
110 std::size_t size = compute_size(shape);
111 return return_type(buffer_type(pointer, size), shape, l);
127 XTL_REQUIRES(detail::not_an_array<std::decay_t<SC>>, detail::not_a_layout<std::decay_t<SS>>)>
131 static_assert(!xtl::is_integral<std::decay_t<SC>>::value,
"shape cannot be a integer");
134 std::forward<C>(container),
135 xtl::forward_sequence<typename return_type::inner_shape_type, SC>(shape),
136 xtl::forward_sequence<typename return_type::inner_strides_type, SS>(
strides)
158 class A = detail::default_allocator_for_ptr_t<P>,
159 XTL_REQUIRES(detail::not_an_array<std::decay_t<SC>>)>
162 typename A::size_type size,
169 static_assert(!xtl::is_integral<SC>::value,
"shape cannot be a integer");
173 buffer_type buf(std::forward<P>(pointer), size, alloc);
174 return return_type(std::move(buf), shape, l);
195 class A = detail::default_allocator_for_ptr_t<P>,
196 XTL_REQUIRES(detail::not_an_array<std::decay_t<SC>>, detail::not_a_layout<std::decay_t<SS>>)>
198 adapt(P&& pointer,
typename A::size_type size, O ownership, SC&& shape, SS&&
strides,
const A& alloc = A())
200 static_assert(!xtl::is_integral<std::decay_t<SC>>::value,
"shape cannot be a integer");
204 buffer_type buf(std::forward<P>(pointer), size, alloc);
207 xtl::forward_sequence<typename return_type::inner_shape_type, SC>(shape),
208 xtl::forward_sequence<typename return_type::inner_strides_type, SS>(
strides)
226 XTL_REQUIRES(detail::not_an_array<std::decay_t<SC>>)>
229 return adapt(&c_array[0], N, xt::no_ownership(), shape, l);
246 XTL_REQUIRES(detail::not_an_array<std::decay_t<SC>>, detail::not_a_layout<std::decay_t<SS>>)>
247 inline auto adapt(T (&c_array)[N], SC&& shape, SS&&
strides)
249 return adapt(&c_array[0], N, xt::no_ownership(), std::forward<SC>(shape), std::forward<SS>(
strides));
264 template <layout_type L = XTENSOR_DEFAULT_LAYOUT,
class C>
267 const std::array<typename std::decay_t<C>::size_type, 1> shape{container.size()};
269 return return_type(std::forward<C>(container), shape, l);
285 XTL_REQUIRES(detail::is_array<std::decay_t<SC>>, detail::not_a_pointer<C>)>
289 static_assert(!xtl::is_integral<SC>::value,
"shape cannot be a integer");
290 constexpr std::size_t N = detail::array_size<SC>::value;
292 return return_type(std::forward<C>(container), shape, l);
307 XTL_REQUIRES(detail::is_array<std::decay_t<SC>>, std::is_pointer<std::remove_reference_t<C>>)>
310 static_assert(!xtl::is_integral<SC>::value,
"shape cannot be a integer");
312 constexpr std::size_t N = detail::array_size<SC>::value;
314 return return_type(buffer_type(pointer, compute_size(shape)), shape, l);
330 XTL_REQUIRES(detail::is_array<std::decay_t<SC>>, detail::not_a_layout<std::decay_t<SS>>)>
334 static_assert(!xtl::is_integral<std::decay_t<SC>>::value,
"shape cannot be a integer");
335 constexpr std::size_t N = detail::array_size<SC>::value;
338 std::forward<C>(container),
339 xtl::forward_sequence<typename return_type::inner_shape_type, SC>(shape),
340 xtl::forward_sequence<typename return_type::inner_strides_type, SS>(
strides)
356 template <layout_type L = XTENSOR_DEFAULT_LAYOUT,
class P,
class O,
class A = detail::default_allocator_for_ptr_t<P>>
358 adapt(P&& pointer,
typename A::size_type size, O ownership,
layout_type l = L,
const A& alloc = A())
363 buffer_type buf(std::forward<P>(pointer), size, alloc);
364 const std::array<typename A::size_type, 1> shape{size};
365 return return_type(std::move(buf), shape, l);
386 class A = detail::default_allocator_for_ptr_t<P>,
387 XTL_REQUIRES(detail::is_array<std::decay_t<SC>>)>
391 typename A::size_type size,
398 static_assert(!xtl::is_integral<SC>::value,
"shape cannot be a integer");
401 constexpr std::size_t N = detail::array_size<SC>::value;
403 buffer_type buf(std::forward<P>(pointer), size, alloc);
404 return return_type(std::move(buf), shape, l);
425 class A = detail::default_allocator_for_ptr_t<P>,
426 XTL_REQUIRES(detail::is_array<std::decay_t<SC>>, detail::not_a_layout<std::decay_t<SS>>)>
428 adapt(P&& pointer,
typename A::size_type size, O ownership, SC&& shape, SS&&
strides,
const A& alloc = A())
430 static_assert(!xtl::is_integral<std::decay_t<SC>>::value,
"shape cannot be a integer");
433 constexpr std::size_t N = detail::array_size<SC>::value;
435 buffer_type buf(std::forward<P>(pointer), size, alloc);
438 xtl::forward_sequence<typename return_type::inner_shape_type, SC>(shape),
439 xtl::forward_sequence<typename return_type::inner_strides_type, SS>(
strides)
457 XTL_REQUIRES(detail::is_array<std::decay_t<SC>>)>
460 return adapt(&c_array[0], N, xt::no_ownership(), shape, l);
477 XTL_REQUIRES(detail::is_array<std::decay_t<SC>>, detail::not_a_layout<std::decay_t<SS>>)>
478 inline auto adapt(T (&c_array)[N], SC&& shape, SS&&
strides)
480 return adapt(&c_array[0], N, xt::no_ownership(), std::forward<SC>(shape), std::forward<SS>(
strides));
495 XTL_REQUIRES(std::is_pointer<std::remove_reference_t<C>>)>
500 return return_type(buffer_type(pointer, detail::fixed_compute_size<
fixed_shape<X...>>::value));
503 template <layout_type L = XTENSOR_DEFAULT_LAYOUT,
class C,
class T, std::
size_t N>
504 inline auto adapt(C&& ptr,
const T (&shape)[N])
506 using shape_type = std::array<std::size_t, N>;
507 return adapt(std::forward<C>(ptr), xtl::forward_sequence<shape_type,
decltype(shape)>(shape));
525 template <layout_type L = XTENSOR_DEFAULT_LAYOUT,
class C,
class SC>
540 template <
class C,
class SC,
class SS>
559 template <layout_type L = XTENSOR_DEFAULT_LAYOUT,
class P,
class O,
class SC,
class A = detail::default_allocator_for_ptr_t<P>>
562 typename A::size_type size,
585 template <
class P,
class O,
class SC,
class SS,
class A = detail::default_allocator_for_ptr_t<P>>
587 adapt(P&& pointer,
typename A::size_type size, O ownership, SC&& shape, SS&&
strides,
const A& alloc = A());
601 template <layout_type L = XTENSOR_DEFAULT_LAYOUT,
class T, std::
size_t N,
class SC>
617 template <
class T, std::
size_t N,
class SC,
class SS>
628 template <
layout_type L = XTENSOR_DEFAULT_LAYOUT,
class C, std::size_t... X>
639 template <layout_type L = XTENSOR_DEFAULT_LAYOUT,
class C>
654 template <layout_type L = XTENSOR_DEFAULT_LAYOUT,
class P,
class O,
class A = detail::default_allocator_for_ptr_t<P>>
656 adapt(P&& pointer,
typename A::size_type size, O ownership,
layout_type l = L,
const A& alloc = A());
686 template <layout_type L = XTENSOR_DEFAULT_LAYOUT,
class P,
class SC, XTL_REQUIRES(detail::not_an_array<std::decay_t<SC>>)>
689 using buffer_adaptor =
xbuffer_adaptor<
decltype(smart_ptr.get()), smart_ownership, std::decay_t<P>>;
691 buffer_adaptor(smart_ptr.get(), compute_size(shape), std::forward<P>(smart_ptr)),
751 XTL_REQUIRES(detail::not_an_array<std::decay_t<SC>>, detail::not_a_layout<std::decay_t<D>>)>
757 buffer_adaptor(data_ptr, compute_size(shape), std::forward<D>(smart_ptr)),
784 template <layout_type L = XTENSOR_DEFAULT_LAYOUT,
class P,
class I, std::
size_t N>
787 using buffer_adaptor =
xbuffer_adaptor<
decltype(smart_ptr.get()), smart_ownership, std::decay_t<P>>;
788 std::array<std::size_t, N> fshape = xtl::forward_sequence<std::array<std::size_t, N>,
decltype(shape)>(
792 buffer_adaptor(smart_ptr.get(), compute_size(fshape), std::forward<P>(smart_ptr)),
851 XTL_REQUIRES(detail::not_a_layout<std::decay_t<D>>)>
855 std::array<std::size_t, N> fshape = xtl::forward_sequence<std::array<std::size_t, N>,
decltype(shape)>(
860 buffer_adaptor(data_ptr, compute_size(fshape), std::forward<D>(smart_ptr)),
885 template <
class T, std::
size_t N, layout_type L = XTENSOR_DEFAULT_LAYOUT>
914 class SC = XTENSOR_DEFAULT_SHAPE_CONTAINER(T, std::allocator<std::size_t>, std::allocator<std::size_t>)>
Fixed shape implementation for compile time defined arrays.
Dense multidimensional container adaptor with tensor semantic.
Dense multidimensional container adaptor with tensor semantic and fixed dimension.
Dense multidimensional container adaptor with tensor semantics and fixed dimension.
auto adapt_smart_ptr(P &&smart_ptr, const SC &shape, layout_type l=L)
Adapt a smart pointer to a typed memory block (unique_ptr or shared_ptr)
xtensor_adaptor< xbuffer_adaptor< xtl::closure_type_t< T * >, xt::no_ownership, detail::default_allocator_for_ptr_t< T > >, N, L > xtensor_pointer
xtensor adaptor for a pointer.
xarray_adaptor< xbuffer_adaptor< xtl::closure_type_t< T * >, xt::no_ownership, detail::default_allocator_for_ptr_t< T > >, L, SC > xarray_pointer
xarray adaptor for a pointer.
auto adapt(C &&container, const SC &shape, layout_type l=L)
Constructs:
auto strides(const E &e, stride_type type=stride_type::normal) noexcept
Get strides of an object.
standard mathematical functions for xexpressions