xtensor
Loading...
Searching...
No Matches
xadapt.hpp
1/***************************************************************************
2 * Copyright (c) Johan Mabille, Sylvain Corlay and Wolf Vollprecht *
3 * Copyright (c) QuantStack *
4 * *
5 * Distributed under the terms of the BSD 3-Clause License. *
6 * *
7 * The full license is in the file LICENSE, distributed with this software. *
8 ****************************************************************************/
9
10#ifndef XTENSOR_ADAPT_HPP
11#define XTENSOR_ADAPT_HPP
12
13#include <array>
14#include <cstddef>
15#include <memory>
16#include <type_traits>
17
18#include <xtl/xsequence.hpp>
19
20#include "xarray.hpp"
21#include "xbuffer_adaptor.hpp"
22#include "xfixed.hpp"
23#include "xtensor.hpp"
24
25namespace xt
26{
31 namespace detail
32 {
33 template <class>
34 struct array_size_impl;
35
36 template <class T, std::size_t N>
37 struct array_size_impl<std::array<T, N>>
38 {
39 static constexpr std::size_t value = N;
40 };
41
42 template <class C>
43 using array_size = array_size_impl<std::decay_t<C>>;
44
45 template <class P>
46 struct default_allocator_for_ptr
47 {
48 using type = std::allocator<std::remove_const_t<std::remove_pointer_t<std::remove_reference_t<P>>>>;
49 };
50
51 template <class P>
52 using default_allocator_for_ptr_t = typename default_allocator_for_ptr<P>::type;
53
54 template <class T>
55 using not_an_array = xtl::negation<is_array<T>>;
56
57 template <class T>
58 using not_a_pointer = xtl::negation<std::is_pointer<T>>;
59
60 template <class T>
61 using not_a_layout = xtl::negation<std::is_same<layout_type, T>>;
62 }
63
64#ifndef IN_DOXYGEN
65
66 /**************************
67 * xarray_adaptor builder *
68 **************************/
69
79 template <
80 layout_type L = XTENSOR_DEFAULT_LAYOUT,
81 class C,
82 class SC,
83 XTL_REQUIRES(detail::not_an_array<std::decay_t<SC>>, detail::not_a_pointer<C>)>
84 inline xarray_adaptor<xtl::closure_type_t<C>, L, std::decay_t<SC>>
85 adapt(C&& container, const SC& shape, layout_type l = L)
86 {
87 static_assert(!xtl::is_integral<SC>::value, "shape cannot be a integer");
88 using return_type = xarray_adaptor<xtl::closure_type_t<C>, L, std::decay_t<SC>>;
89 return return_type(std::forward<C>(container), shape, l);
90 }
91
100 template <
101 layout_type L = XTENSOR_DEFAULT_LAYOUT,
102 class C,
103 class SC,
104 XTL_REQUIRES(detail::not_an_array<std::decay_t<SC>>, std::is_pointer<std::remove_reference_t<C>>)>
105 inline auto adapt(C&& pointer, const SC& shape, layout_type l = L)
106 {
107 static_assert(!xtl::is_integral<SC>::value, "shape cannot be a integer");
108 using buffer_type = xbuffer_adaptor<C, xt::no_ownership, detail::default_allocator_for_ptr_t<C>>;
109 using return_type = xarray_adaptor<buffer_type, L, std::decay_t<SC>>;
110 std::size_t size = compute_size(shape);
111 return return_type(buffer_type(pointer, size), shape, l);
112 }
113
123 template <
124 class C,
125 class SC,
126 class SS,
127 XTL_REQUIRES(detail::not_an_array<std::decay_t<SC>>, detail::not_a_layout<std::decay_t<SS>>)>
128 inline xarray_adaptor<xtl::closure_type_t<C>, layout_type::dynamic, std::decay_t<SC>>
129 adapt(C&& container, SC&& shape, SS&& strides)
130 {
131 static_assert(!xtl::is_integral<std::decay_t<SC>>::value, "shape cannot be a integer");
132 using return_type = xarray_adaptor<xtl::closure_type_t<C>, layout_type::dynamic, std::decay_t<SC>>;
133 return return_type(
134 std::forward<C>(container),
135 xtl::forward_sequence<typename return_type::inner_shape_type, SC>(shape),
136 xtl::forward_sequence<typename return_type::inner_strides_type, SS>(strides)
137 );
138 }
139
153 template <
154 layout_type L = XTENSOR_DEFAULT_LAYOUT,
155 class P,
156 class O,
157 class SC,
158 class A = detail::default_allocator_for_ptr_t<P>,
159 XTL_REQUIRES(detail::not_an_array<std::decay_t<SC>>)>
160 inline xarray_adaptor<xbuffer_adaptor<xtl::closure_type_t<P>, O, A>, L, SC> adapt(
161 P&& pointer,
162 typename A::size_type size,
163 O ownership,
164 const SC& shape,
165 layout_type l = L,
166 const A& alloc = A()
167 )
168 {
169 static_assert(!xtl::is_integral<SC>::value, "shape cannot be a integer");
170 (void) ownership;
171 using buffer_type = xbuffer_adaptor<xtl::closure_type_t<P>, O, A>;
172 using return_type = xarray_adaptor<buffer_type, L, SC>;
173 buffer_type buf(std::forward<P>(pointer), size, alloc);
174 return return_type(std::move(buf), shape, l);
175 }
176
190 template <
191 class P,
192 class O,
193 class SC,
194 class SS,
195 class A = detail::default_allocator_for_ptr_t<P>,
196 XTL_REQUIRES(detail::not_an_array<std::decay_t<SC>>, detail::not_a_layout<std::decay_t<SS>>)>
197 inline xarray_adaptor<xbuffer_adaptor<xtl::closure_type_t<P>, O, A>, layout_type::dynamic, std::decay_t<SC>>
198 adapt(P&& pointer, typename A::size_type size, O ownership, SC&& shape, SS&& strides, const A& alloc = A())
199 {
200 static_assert(!xtl::is_integral<std::decay_t<SC>>::value, "shape cannot be a integer");
201 (void) ownership;
202 using buffer_type = xbuffer_adaptor<xtl::closure_type_t<P>, O, A>;
203 using return_type = xarray_adaptor<buffer_type, layout_type::dynamic, std::decay_t<SC>>;
204 buffer_type buf(std::forward<P>(pointer), size, alloc);
205 return return_type(
206 std::move(buf),
207 xtl::forward_sequence<typename return_type::inner_shape_type, SC>(shape),
208 xtl::forward_sequence<typename return_type::inner_strides_type, SS>(strides)
209 );
210 }
211
221 template <
222 layout_type L = XTENSOR_DEFAULT_LAYOUT,
223 class T,
224 std::size_t N,
225 class SC,
226 XTL_REQUIRES(detail::not_an_array<std::decay_t<SC>>)>
227 inline auto adapt(T (&c_array)[N], const SC& shape, layout_type l = L)
228 {
229 return adapt(&c_array[0], N, xt::no_ownership(), shape, l);
230 }
231
241 template <
242 class T,
243 std::size_t N,
244 class SC,
245 class SS,
246 XTL_REQUIRES(detail::not_an_array<std::decay_t<SC>>, detail::not_a_layout<std::decay_t<SS>>)>
247 inline auto adapt(T (&c_array)[N], SC&& shape, SS&& strides)
248 {
249 return adapt(&c_array[0], N, xt::no_ownership(), std::forward<SC>(shape), std::forward<SS>(strides));
250 }
251
252 /***************************
253 * xtensor_adaptor builder *
254 ***************************/
255
264 template <layout_type L = XTENSOR_DEFAULT_LAYOUT, class C>
265 inline xtensor_adaptor<C, 1, L> adapt(C&& container, layout_type l = L)
266 {
267 const std::array<typename std::decay_t<C>::size_type, 1> shape{container.size()};
268 using return_type = xtensor_adaptor<xtl::closure_type_t<C>, 1, L>;
269 return return_type(std::forward<C>(container), shape, l);
270 }
271
281 template <
282 layout_type L = XTENSOR_DEFAULT_LAYOUT,
283 class C,
284 class SC,
285 XTL_REQUIRES(detail::is_array<std::decay_t<SC>>, detail::not_a_pointer<C>)>
286 inline xtensor_adaptor<C, detail::array_size<SC>::value, L>
287 adapt(C&& container, const SC& shape, layout_type l = L)
288 {
289 static_assert(!xtl::is_integral<SC>::value, "shape cannot be a integer");
290 constexpr std::size_t N = detail::array_size<SC>::value;
291 using return_type = xtensor_adaptor<xtl::closure_type_t<C>, N, L>;
292 return return_type(std::forward<C>(container), shape, l);
293 }
294
303 template <
304 layout_type L = XTENSOR_DEFAULT_LAYOUT,
305 class C,
306 class SC,
307 XTL_REQUIRES(detail::is_array<std::decay_t<SC>>, std::is_pointer<std::remove_reference_t<C>>)>
308 inline auto adapt(C&& pointer, const SC& shape, layout_type l = L)
309 {
310 static_assert(!xtl::is_integral<SC>::value, "shape cannot be a integer");
311 using buffer_type = xbuffer_adaptor<C, xt::no_ownership, detail::default_allocator_for_ptr_t<C>>;
312 constexpr std::size_t N = detail::array_size<SC>::value;
313 using return_type = xtensor_adaptor<buffer_type, N, L>;
314 return return_type(buffer_type(pointer, compute_size(shape)), shape, l);
315 }
316
326 template <
327 class C,
328 class SC,
329 class SS,
330 XTL_REQUIRES(detail::is_array<std::decay_t<SC>>, detail::not_a_layout<std::decay_t<SS>>)>
331 inline xtensor_adaptor<C, detail::array_size<SC>::value, layout_type::dynamic>
332 adapt(C&& container, SC&& shape, SS&& strides)
333 {
334 static_assert(!xtl::is_integral<std::decay_t<SC>>::value, "shape cannot be a integer");
335 constexpr std::size_t N = detail::array_size<SC>::value;
336 using return_type = xtensor_adaptor<xtl::closure_type_t<C>, N, layout_type::dynamic>;
337 return return_type(
338 std::forward<C>(container),
339 xtl::forward_sequence<typename return_type::inner_shape_type, SC>(shape),
340 xtl::forward_sequence<typename return_type::inner_strides_type, SS>(strides)
341 );
342 }
343
356 template <layout_type L = XTENSOR_DEFAULT_LAYOUT, class P, class O, class A = detail::default_allocator_for_ptr_t<P>>
357 inline xtensor_adaptor<xbuffer_adaptor<xtl::closure_type_t<P>, O, A>, 1, L>
358 adapt(P&& pointer, typename A::size_type size, O ownership, layout_type l = L, const A& alloc = A())
359 {
360 (void) ownership;
361 using buffer_type = xbuffer_adaptor<xtl::closure_type_t<P>, O, A>;
362 using return_type = xtensor_adaptor<buffer_type, 1, L>;
363 buffer_type buf(std::forward<P>(pointer), size, alloc);
364 const std::array<typename A::size_type, 1> shape{size};
365 return return_type(std::move(buf), shape, l);
366 }
367
381 template <
382 layout_type L = XTENSOR_DEFAULT_LAYOUT,
383 class P,
384 class O,
385 class SC,
386 class A = detail::default_allocator_for_ptr_t<P>,
387 XTL_REQUIRES(detail::is_array<std::decay_t<SC>>)>
388 inline xtensor_adaptor<xbuffer_adaptor<xtl::closure_type_t<P>, O, A>, detail::array_size<SC>::value, L>
389 adapt(
390 P&& pointer,
391 typename A::size_type size,
392 O ownership,
393 const SC& shape,
394 layout_type l = L,
395 const A& alloc = A()
396 )
397 {
398 static_assert(!xtl::is_integral<SC>::value, "shape cannot be a integer");
399 (void) ownership;
400 using buffer_type = xbuffer_adaptor<xtl::closure_type_t<P>, O, A>;
401 constexpr std::size_t N = detail::array_size<SC>::value;
402 using return_type = xtensor_adaptor<buffer_type, N, L>;
403 buffer_type buf(std::forward<P>(pointer), size, alloc);
404 return return_type(std::move(buf), shape, l);
405 }
406
420 template <
421 class P,
422 class O,
423 class SC,
424 class SS,
425 class A = detail::default_allocator_for_ptr_t<P>,
426 XTL_REQUIRES(detail::is_array<std::decay_t<SC>>, detail::not_a_layout<std::decay_t<SS>>)>
427 inline xtensor_adaptor<xbuffer_adaptor<xtl::closure_type_t<P>, O, A>, detail::array_size<SC>::value, layout_type::dynamic>
428 adapt(P&& pointer, typename A::size_type size, O ownership, SC&& shape, SS&& strides, const A& alloc = A())
429 {
430 static_assert(!xtl::is_integral<std::decay_t<SC>>::value, "shape cannot be a integer");
431 (void) ownership;
432 using buffer_type = xbuffer_adaptor<xtl::closure_type_t<P>, O, A>;
433 constexpr std::size_t N = detail::array_size<SC>::value;
434 using return_type = xtensor_adaptor<buffer_type, N, layout_type::dynamic>;
435 buffer_type buf(std::forward<P>(pointer), size, alloc);
436 return return_type(
437 std::move(buf),
438 xtl::forward_sequence<typename return_type::inner_shape_type, SC>(shape),
439 xtl::forward_sequence<typename return_type::inner_strides_type, SS>(strides)
440 );
441 }
442
452 template <
453 layout_type L = XTENSOR_DEFAULT_LAYOUT,
454 class T,
455 std::size_t N,
456 class SC,
457 XTL_REQUIRES(detail::is_array<std::decay_t<SC>>)>
458 inline auto adapt(T (&c_array)[N], const SC& shape, layout_type l = L)
459 {
460 return adapt(&c_array[0], N, xt::no_ownership(), shape, l);
461 }
462
472 template <
473 class T,
474 std::size_t N,
475 class SC,
476 class SS,
477 XTL_REQUIRES(detail::is_array<std::decay_t<SC>>, detail::not_a_layout<std::decay_t<SS>>)>
478 inline auto adapt(T (&c_array)[N], SC&& shape, SS&& strides)
479 {
480 return adapt(&c_array[0], N, xt::no_ownership(), std::forward<SC>(shape), std::forward<SS>(strides));
481 }
482
491 template <
492 layout_type L = XTENSOR_DEFAULT_LAYOUT,
493 class C,
494 std::size_t... X,
495 XTL_REQUIRES(std::is_pointer<std::remove_reference_t<C>>)>
496 inline auto adapt(C&& pointer, const fixed_shape<X...>& /*shape*/)
497 {
498 using buffer_type = xbuffer_adaptor<C, xt::no_ownership, detail::default_allocator_for_ptr_t<C>>;
499 using return_type = xfixed_adaptor<buffer_type, fixed_shape<X...>, L>;
500 return return_type(buffer_type(pointer, detail::fixed_compute_size<fixed_shape<X...>>::value));
501 }
502
503 template <layout_type L = XTENSOR_DEFAULT_LAYOUT, class C, class T, std::size_t N>
504 inline auto adapt(C&& ptr, const T (&shape)[N])
505 {
506 using shape_type = std::array<std::size_t, N>;
507 return adapt(std::forward<C>(ptr), xtl::forward_sequence<shape_type, decltype(shape)>(shape));
508 }
509
510#else // IN_DOXYGEN
511
525 template <layout_type L = XTENSOR_DEFAULT_LAYOUT, class C, class SC>
526 inline auto adapt(C&& container, const SC& shape, layout_type l = L);
527
540 template <class C, class SC, class SS>
541 inline auto adapt(C&& container, SC&& shape, SS&& strides);
542
559 template <layout_type L = XTENSOR_DEFAULT_LAYOUT, class P, class O, class SC, class A = detail::default_allocator_for_ptr_t<P>>
560 inline auto adapt(
561 P&& pointer,
562 typename A::size_type size,
563 O ownership,
564 const SC& shape,
565 layout_type l = L,
566 const A& alloc = A()
567 );
568
585 template <class P, class O, class SC, class SS, class A = detail::default_allocator_for_ptr_t<P>>
586 inline auto
587 adapt(P&& pointer, typename A::size_type size, O ownership, SC&& shape, SS&& strides, const A& alloc = A());
588
601 template <layout_type L = XTENSOR_DEFAULT_LAYOUT, class T, std::size_t N, class SC>
602 inline auto adapt(T (&c_array)[N], const SC& shape, layout_type l = L);
603
617 template <class T, std::size_t N, class SC, class SS>
618 inline auto adapt(T (&c_array)[N], SC&& shape, SS&& strides);
619
628 template <layout_type L = XTENSOR_DEFAULT_LAYOUT, class C, std::size_t... X>
629 inline auto adapt(C&& pointer, const fixed_shape<X...>& /*shape*/);
630
639 template <layout_type L = XTENSOR_DEFAULT_LAYOUT, class C>
641
654 template <layout_type L = XTENSOR_DEFAULT_LAYOUT, class P, class O, class A = detail::default_allocator_for_ptr_t<P>>
656 adapt(P&& pointer, typename A::size_type size, O ownership, layout_type l = L, const A& alloc = A());
657
658#endif // IN_DOXYGEN
659
660 /*****************************
661 * smart_ptr adapter builder *
662 *****************************/
663
686 template <layout_type L = XTENSOR_DEFAULT_LAYOUT, class P, class SC, XTL_REQUIRES(detail::not_an_array<std::decay_t<SC>>)>
687 auto adapt_smart_ptr(P&& smart_ptr, const SC& shape, layout_type l = L)
688 {
689 using buffer_adaptor = xbuffer_adaptor<decltype(smart_ptr.get()), smart_ownership, std::decay_t<P>>;
691 buffer_adaptor(smart_ptr.get(), compute_size(shape), std::forward<P>(smart_ptr)),
692 shape,
693 l
694 );
695 }
696
746 template <
747 layout_type L = XTENSOR_DEFAULT_LAYOUT,
748 class P,
749 class SC,
750 class D,
751 XTL_REQUIRES(detail::not_an_array<std::decay_t<SC>>, detail::not_a_layout<std::decay_t<D>>)>
752 auto adapt_smart_ptr(P&& data_ptr, const SC& shape, D&& smart_ptr, layout_type l = L)
753 {
755
757 buffer_adaptor(data_ptr, compute_size(shape), std::forward<D>(smart_ptr)),
758 shape,
759 l
760 );
761 }
762
784 template <layout_type L = XTENSOR_DEFAULT_LAYOUT, class P, class I, std::size_t N>
785 auto adapt_smart_ptr(P&& smart_ptr, const I (&shape)[N], layout_type l = L)
786 {
787 using buffer_adaptor = xbuffer_adaptor<decltype(smart_ptr.get()), smart_ownership, std::decay_t<P>>;
788 std::array<std::size_t, N> fshape = xtl::forward_sequence<std::array<std::size_t, N>, decltype(shape)>(
789 shape
790 );
792 buffer_adaptor(smart_ptr.get(), compute_size(fshape), std::forward<P>(smart_ptr)),
793 std::move(fshape),
794 l
795 );
796 }
797
845 template <
846 layout_type L = XTENSOR_DEFAULT_LAYOUT,
847 class P,
848 class I,
849 std::size_t N,
850 class D,
851 XTL_REQUIRES(detail::not_a_layout<std::decay_t<D>>)>
852 auto adapt_smart_ptr(P&& data_ptr, const I (&shape)[N], D&& smart_ptr, layout_type l = L)
853 {
855 std::array<std::size_t, N> fshape = xtl::forward_sequence<std::array<std::size_t, N>, decltype(shape)>(
856 shape
857 );
858
860 buffer_adaptor(data_ptr, compute_size(fshape), std::forward<D>(smart_ptr)),
861 std::move(fshape),
862 l
863 );
864 }
865
885 template <class T, std::size_t N, layout_type L = XTENSOR_DEFAULT_LAYOUT>
887 xbuffer_adaptor<xtl::closure_type_t<T*>, xt::no_ownership, detail::default_allocator_for_ptr_t<T>>,
888 N,
889 L>;
890
911 template <
912 class T,
913 layout_type L = XTENSOR_DEFAULT_LAYOUT,
914 class SC = XTENSOR_DEFAULT_SHAPE_CONTAINER(T, std::allocator<std::size_t>, std::allocator<std::size_t>)>
916 xbuffer_adaptor<xtl::closure_type_t<T*>, xt::no_ownership, detail::default_allocator_for_ptr_t<T>>,
917 L,
918 SC>;
919}
920
921#endif
Dense multidimensional container adaptor with tensor semantic.
Definition xarray.hpp:217
Dense multidimensional container adaptor with tensor semantics and fixed dimension.
Definition xtensor.hpp:212
auto adapt_smart_ptr(P &&smart_ptr, const SC &shape, layout_type l=L)
Adapt a smart pointer to a typed memory block (unique_ptr or shared_ptr)
Definition xadapt.hpp:687
auto adapt(C &&container, const SC &shape, layout_type l=L)
Constructs:
auto strides(const E &e, stride_type type=stride_type::normal) noexcept
Get strides of an object.
Definition xstrides.hpp:248
standard mathematical functions for xexpressions
layout_type
Definition xlayout.hpp:24