10#ifndef XTENSOR_SORT_HPP
11#define XTENSOR_SORT_HPP
18#include <xtl/xcompare.hpp>
23#include "xindex_view.hpp"
24#include "xmanipulation.hpp"
28#include "xtensor_config.hpp"
29#include "xtensor_forward.hpp"
44 std::ptrdiff_t adjust_secondary_stride(std::ptrdiff_t stride, T shape)
46 return stride != 0 ? stride :
static_cast<std::ptrdiff_t
>(shape);
50 inline std::ptrdiff_t get_secondary_stride(
const E& ev)
54 return adjust_secondary_stride(ev.strides()[ev.dimension() - 2], *(ev.shape().end() - 1));
57 return adjust_secondary_stride(ev.strides()[1], *(ev.shape().begin()));
61 inline std::size_t leading_axis_n_iters(
const E& ev)
65 return std::accumulate(
72 return std::accumulate(ev.shape().begin() + 1, ev.shape().end(), std::size_t(1), std::multiplies<>());
75 template <
class E,
class F>
76 inline void call_over_leading_axis(E& ev, F&& fct)
78 XTENSOR_ASSERT(ev.dimension() >= 2);
80 const std::size_t n_iters = leading_axis_n_iters(ev);
81 const std::ptrdiff_t secondary_stride = get_secondary_stride(ev);
83 const auto begin = ev.data();
84 const auto end = begin + n_iters * secondary_stride;
85 for (
auto iter = begin; iter != end; iter += secondary_stride)
87 fct(iter, iter + secondary_stride);
91 template <
class E1,
class E2,
class F>
92 inline void call_over_leading_axis(E1& e1, E2& e2, F&& fct)
94 XTENSOR_ASSERT(e1.dimension() >= 2);
95 XTENSOR_ASSERT(e1.dimension() == e2.dimension());
97 const std::size_t n_iters = leading_axis_n_iters(e1);
98 const std::ptrdiff_t secondary_stride1 = get_secondary_stride(e1);
99 const std::ptrdiff_t secondary_stride2 = get_secondary_stride(e2);
100 XTENSOR_ASSERT(secondary_stride1 == secondary_stride2);
102 const auto begin1 = e1.data();
103 const auto end1 = begin1 + n_iters * secondary_stride1;
104 const auto begin2 = e2.data();
105 const auto end2 = begin2 + n_iters * secondary_stride2;
108 for (; (iter1 != end1) && (iter2 != end2); iter1 += secondary_stride1, iter2 += secondary_stride2)
110 fct(iter1, iter1 + secondary_stride1, iter2, iter2 + secondary_stride2);
115 inline std::size_t leading_axis(
const E& e)
119 return e.dimension() - 1;
125 XTENSOR_THROW(std::runtime_error,
"Layout not supported.");
129 inline std::pair<dynamic_shape<std::size_t>, dynamic_shape<std::size_t>>
130 get_permutations(std::size_t dim, std::size_t ax,
layout_type layout)
132 dynamic_shape<std::size_t> permutation(dim);
133 std::iota(permutation.begin(), permutation.end(), std::size_t(0));
134 permutation.erase(permutation.begin() + std::ptrdiff_t(ax));
138 permutation.push_back(ax);
142 permutation.insert(permutation.begin(), ax);
146 dynamic_shape<std::size_t> reverse_permutation;
147 for (std::size_t i = 0; i < dim; ++i)
149 auto it = std::find(permutation.begin(), permutation.end(), i);
150 reverse_permutation.push_back(std::size_t(std::distance(permutation.begin(), it)));
153 return std::make_pair(std::move(permutation), std::move(reverse_permutation));
156 template <
class R,
class E,
class F>
157 inline R map_axis(
const E& e, std::ptrdiff_t axis, F&& lambda)
159 if (e.dimension() == 1)
162 lambda(res.begin(), res.end());
166 const std::size_t ax = normalize_axis(e.dimension(), axis);
167 if (ax == detail::leading_axis(e))
170 detail::call_over_leading_axis(res, std::forward<F>(lambda));
174 dynamic_shape<std::size_t> permutation, reverse_permutation;
175 std::tie(permutation, reverse_permutation) = get_permutations(e.dimension(), ax, e.layout());
177 detail::call_over_leading_axis(res, std::forward<F>(lambda));
178 res =
transpose(res, reverse_permutation);
183 struct flatten_sort_result_type_impl
188 template <
class VT, std::
size_t N, layout_type L>
189 struct flatten_sort_result_type_impl<
xtensor<VT, N, L>>
191 using type = xtensor<VT, 1, L>;
194 template <
class VT,
class S, layout_type L>
195 struct flatten_sort_result_type_impl<
xtensor_fixed<VT, S, L>>
197 using type = xtensor_fixed<VT, xshape<fixed_compute_size<S>::value>, L>;
201 struct flatten_sort_result_type : flatten_sort_result_type_impl<common_tensor_type_t<VT>>
206 using flatten_sort_result_type_t =
typename flatten_sort_result_type<VT>::type;
208 template <
class E,
class R = flatten_sort_result_type_t<E>>
209 inline auto flat_sort_impl(
const xexpression<E>& e)
211 const auto& de = e.derived_cast();
213 ev.resize({
static_cast<typename R::shape_type::value_type
>(de.size())});
215 std::copy(de.cbegin(), de.cend(), ev.begin());
216 std::sort(ev.begin(), ev.end());
223 inline auto sort(
const xexpression<E>& e, placeholders::xtuph )
225 return detail::flat_sort_impl(e);
231 struct sort_eval_type
233 using type =
typename T::temporary_type;
236 template <
class T, std::size_t... I,
layout_type L>
237 struct sort_eval_type<
xtensor_fixed<T, fixed_shape<I...>, L>>
239 using type =
xtensor<T,
sizeof...(I), L>;
257 using eval_type =
typename detail::sort_eval_type<E>::type;
259 return detail::map_axis<eval_type>(
262 [](
auto begin,
auto end)
264 std::sort(begin, end);
294 template <
class ConstRandomIt,
class RandomIt,
class Compare,
class Method>
295 inline void argsort_iter(
296 ConstRandomIt data_begin,
297 ConstRandomIt data_end,
304 XTENSOR_ASSERT(std::distance(data_begin, data_end) >= 0);
305 XTENSOR_ASSERT(std::distance(idx_begin, idx_end) == std::distance(data_begin, data_end));
308 std::iota(idx_begin, idx_end, 0);
316 [&](
const auto i,
const auto j)
318 return comp(*(data_begin + i), *(data_begin + j));
327 [&](
const auto i,
const auto j)
329 return comp(*(data_begin + i), *(data_begin + j));
336 template <
class ConstRandomIt,
class RandomIt,
class Method>
338 argsort_iter(ConstRandomIt data_begin, ConstRandomIt data_end, RandomIt idx_begin, RandomIt idx_end, Method method)
341 std::move(data_begin),
343 std::move(idx_begin),
345 [](
const auto& x,
const auto& y) ->
bool
353 template <
class VT,
class T>
354 struct rebind_value_type
356 using type = xarray<VT, xt::layout_type::dynamic>;
359 template <
class VT,
class EC, layout_type L>
360 struct rebind_value_type<VT,
xarray<EC, L>>
362 using type = xarray<VT, L>;
365 template <
class VT,
class EC, std::
size_t N, layout_type L>
366 struct rebind_value_type<VT,
xtensor<EC, N, L>>
368 using type = xtensor<VT, N, L>;
371 template <
class VT,
class ET,
class S, layout_type L>
374 using type = xtensor_fixed<VT, S, L>;
377 template <
class VT,
class T>
378 struct flatten_rebind_value_type
380 using type =
typename rebind_value_type<VT, T>::type;
383 template <
class VT,
class EC, std::
size_t N, layout_type L>
384 struct flatten_rebind_value_type<VT,
xtensor<EC, N, L>>
386 using type = xtensor<VT, 1, L>;
389 template <
class VT,
class ET,
class S, layout_type L>
390 struct flatten_rebind_value_type<VT,
xtensor_fixed<ET, S, L>>
392 using type = xtensor_fixed<VT, xshape<fixed_compute_size<S>::value>, L>;
396 struct argsort_result_type
398 using type =
typename rebind_value_type<typename T::temporary_type::size_type, typename T::temporary_type>::type;
402 struct linear_argsort_result_type
404 using type =
typename flatten_rebind_value_type<
405 typename T::temporary_type::size_type,
406 typename T::temporary_type>::type;
409 template <class E, class R = typename detail::linear_argsort_result_type<E>::type,
class Method>
410 inline auto flatten_argsort_impl(const xexpression<E>& e, Method method)
412 const auto& de = e.derived_cast();
414 auto cit = de.template begin<layout_type::row_major>();
415 using const_iterator =
decltype(cit);
416 auto ad = xiterator_adaptor<const_iterator, const_iterator>(cit, cit, de.size());
418 using result_type = R;
420 result.resize({de.size()});
422 detail::argsort_iter(de.cbegin(), de.cend(), result.begin(), result.end(), method);
432 return detail::flatten_argsort_impl(e, method);
454 using eval_type =
typename detail::sort_eval_type<E>::type;
455 using result_type =
typename detail::argsort_result_type<eval_type>::type;
457 const auto&
de =
e.derived_cast();
459 std::size_t
ax = normalize_axis(
de.dimension(), axis);
461 if (
de.dimension() == 1)
463 return detail::flatten_argsort_impl<E, result_type>(
e,
method);
471 if (
ax == detail::leading_axis(
de))
473 result_type
res = result_type::from_shape(
de.shape());
474 detail::call_over_leading_axis(
res,
de, argsort);
481 result_type
res = result_type::from_shape(
ev.shape());
482 detail::call_over_leading_axis(
res,
ev, argsort);
504 template <
class RandomIt,
class Iter,
class Compare>
506 partition_iter(RandomIt data_begin, RandomIt data_end, Iter kth_begin, Iter kth_end, Compare comp)
508 XTENSOR_ASSERT(std::distance(data_begin, data_end) >= 0);
509 XTENSOR_ASSERT(std::distance(kth_begin, kth_end) >= 0);
511 using idx_type =
typename std::iterator_traits<Iter>::value_type;
513 idx_type k_last =
static_cast<idx_type
>(std::distance(data_begin, data_end));
514 for (; kth_begin != kth_end; ++kth_begin)
516 std::nth_element(data_begin, data_begin + *kth_begin, data_begin + k_last, std::move(comp));
521 template <
class RandomIt,
class Iter>
522 inline void partition_iter(RandomIt data_begin, RandomIt data_end, Iter kth_begin, Iter kth_end)
524 return partition_iter(
525 std::move(data_begin),
527 std::move(kth_begin),
529 [](
const auto& x,
const auto& y) ->
bool
567 class R = detail::flatten_sort_result_type_t<E>,
568 class = std::enable_if_t<!xtl::is_integral<C>::value,
int>>
571 const auto&
de =
e.derived_cast();
573 R ev = R::from_shape({
de.size()});
576 std::copy(
de.linear_cbegin(),
de.linear_cend(),
ev.linear_begin());
583 template <
class E,
class I, std::
size_t N,
class R = detail::flatten_sort_result_type_t<E>>
584 inline R
partition(
const xexpression<E>& e,
const I (&kth_container)[N], placeholders::xtuph tag)
588 xtl::forward_sequence<std::array<std::size_t, N>,
decltype(kth_container)>(kth_container),
593 template <
class E,
class R = detail::flatten_sort_result_type_t<E>>
594 inline R
partition(
const xexpression<E>& e, std::size_t kth, placeholders::xtuph tag)
596 return partition(e, std::array<std::size_t, 1>({kth}), tag);
599 template <class E, class C, class = std::enable_if_t<!xtl::is_integral<C>::value,
int>>
600 inline auto partition(
const xexpression<E>& e, C kth_container, std::ptrdiff_t axis = -1)
602 using eval_type =
typename detail::sort_eval_type<E>::type;
604 std::sort(kth_container.begin(), kth_container.end());
606 return detail::map_axis<eval_type>(
609 [&kth_container](
auto begin,
auto end)
611 detail::partition_iter(begin, end, kth_container.rbegin(), kth_container.rend());
616 template <
class E,
class T, std::
size_t N>
617 inline auto partition(
const xexpression<E>& e,
const T (&kth_container)[N], std::ptrdiff_t axis = -1)
621 xtl::forward_sequence<std::array<std::size_t, N>,
decltype(kth_container)>(kth_container),
627 inline auto partition(
const xexpression<E>& e, std::size_t kth, std::ptrdiff_t axis = -1)
629 return partition(e, std::array<std::size_t, 1>({kth}), axis);
662 class R =
typename detail::linear_argsort_result_type<typename detail::sort_eval_type<E>::type>::type,
663 class = std::enable_if_t<!xtl::is_integral<C>::value, int>>
666 using eval_type =
typename detail::sort_eval_type<E>::type;
667 using result_type =
typename detail::linear_argsort_result_type<eval_type>::type;
669 const auto&
de =
e.derived_cast();
671 result_type
res = result_type::from_shape({
de.size()});
675 std::iota(
res.linear_begin(),
res.linear_end(), 0);
677 detail::partition_iter(
682 [&
de](std::size_t
a, std::size_t
b)
684 return de[a] < de[b];
691 template <
class E,
class I, std::
size_t N>
692 inline auto argpartition(
const xexpression<E>& e,
const I (&kth_container)[N], placeholders::xtuph tag)
696 xtl::forward_sequence<std::array<std::size_t, N>,
decltype(kth_container)>(kth_container),
702 inline auto argpartition(
const xexpression<E>& e, std::size_t kth, placeholders::xtuph tag)
704 return argpartition(e, std::array<std::size_t, 1>({kth}), tag);
707 template <class E, class C, class = std::enable_if_t<!xtl::is_integral<C>::value,
int>>
708 inline auto argpartition(
const xexpression<E>& e, C kth_container, std::ptrdiff_t axis = -1)
710 using eval_type =
typename detail::sort_eval_type<E>::type;
711 using result_type =
typename detail::argsort_result_type<eval_type>::type;
713 const auto& de = e.derived_cast();
715 if (de.dimension() == 1)
717 return argpartition<E, C, result_type>(e, std::forward<C>(kth_container), xnone());
720 std::sort(kth_container.begin(), kth_container.end());
721 const auto argpartition_w_kth =
722 [&kth_container](
auto res_begin,
auto res_end,
auto ev_begin,
auto )
724 std::iota(res_begin, res_end, 0);
725 detail::partition_iter(
728 kth_container.rbegin(),
729 kth_container.rend(),
730 [&ev_begin](
auto const& i,
auto const& j)
732 return *(ev_begin + i) < *(ev_begin + j);
737 const std::size_t ax = normalize_axis(de.dimension(), axis);
738 if (ax == detail::leading_axis(de))
740 result_type res = result_type::from_shape(de.shape());
741 detail::call_over_leading_axis(res, de, argpartition_w_kth);
745 dynamic_shape<std::size_t> permutation, reverse_permutation;
746 std::tie(permutation, reverse_permutation) = detail::get_permutations(de.dimension(), ax, de.layout());
747 eval_type ev =
transpose(de, permutation);
748 result_type res = result_type::from_shape(ev.shape());
749 detail::call_over_leading_axis(res, ev, argpartition_w_kth);
750 res =
transpose(res, reverse_permutation);
754 template <
class E,
class I, std::
size_t N>
755 inline auto argpartition(
const xexpression<E>& e,
const I (&kth_container)[N], std::ptrdiff_t axis = -1)
759 xtl::forward_sequence<std::array<std::size_t, N>,
decltype(kth_container)>(kth_container),
765 inline auto argpartition(
const xexpression<E>& e, std::size_t kth, std::ptrdiff_t axis = -1)
767 return argpartition(e, std::array<std::size_t, 1>({kth}), axis);
776 template <
class S,
class I,
class K,
class O>
777 inline void select_indices_impl(
781 std::size_t current_dim,
782 const K& current_index,
786 using id_t =
typename K::value_type;
787 if ((current_dim < shape.size() - 1) && (current_dim == axis))
789 for (
auto i : indices)
791 auto idx = current_index;
792 idx[current_dim] = i;
793 select_indices_impl(shape, indices, axis, current_dim + 1, idx, out);
796 else if ((current_dim < shape.size() - 1) && (current_dim != axis))
798 for (id_t i = 0; xtl::cmp_less(i, shape[current_dim]); ++i)
800 auto idx = current_index;
801 idx[current_dim] = i;
802 select_indices_impl(shape, indices, axis, current_dim + 1, idx, out);
805 else if ((current_dim == shape.size() - 1) && (current_dim == axis))
807 for (
auto i : indices)
809 auto idx = current_index;
810 idx[current_dim] = i;
811 out.push_back(std::move(idx));
814 else if ((current_dim == shape.size() - 1) && (current_dim != axis))
816 for (id_t i = 0; xtl::cmp_less(i, shape[current_dim]); ++i)
818 auto idx = current_index;
819 idx[current_dim] = i;
820 out.push_back(std::move(idx));
825 template <
class S,
class I>
826 inline auto select_indices(
const S& shape,
const I& indices, std::size_t axis)
828 using index_type = get_strides_t<S>;
829 auto out = std::vector<index_type>();
830 select_indices_impl(shape, indices, axis, 0, xtl::make_sequence<index_type>(shape.size()), out);
836 template <
class E,
class I>
837 inline auto fancy_indexing(E&& e,
const I& indices, std::ptrdiff_t axis)
839 const std::size_t ax = normalize_axis(e.dimension(), axis);
840 using shape_t = get_strides_t<typename std::decay_t<E>::shape_type>;
841 auto shape = xtl::forward_sequence<shape_t,
decltype(e.shape())>(e.shape());
842 shape[ax] = indices.size();
844 index_view(std::forward<E>(e), select_indices(e.shape(), indices, ax)),
849 template <
class T,
class I,
class P>
850 inline auto quantile_kth_gamma(std::size_t n,
const P& probas, T alpha, T beta)
852 const auto m = alpha + probas * (T(1) - alpha - beta);
854 const auto p_n_m =
eval(probas *
static_cast<T
>(n) + m - 1);
856 const auto j =
floor(p_n_m);
862 return std::make_pair(
eval(k_kp1),
eval(omg_g));
867 inline auto unsqueeze_shape(
const S& shape, std::size_t axis)
869 XTENSOR_ASSERT(axis <= shape.size());
870 auto new_shape = xtl::forward_sequence<xt::svector<std::size_t>,
decltype(shape)>(shape);
871 new_shape.insert(new_shape.begin() + axis, 1);
905 template <
class T =
double,
class E,
class P>
910 XTENSOR_ASSERT(0. <=
alpha);
911 XTENSOR_ASSERT(
alpha <= 1.);
912 XTENSOR_ASSERT(0. <=
beta);
913 XTENSOR_ASSERT(
beta <= 1.);
916 using id_t =
typename tmp_shape_t::value_type;
918 const std::size_t
ax = normalize_axis(
e.dimension(), axis);
919 const std::size_t
n =
e.shape()[
ax];
927 auto gm1_g_shape = xtl::make_sequence<tmp_shape_t>(
e.dimension(), 1);
944 template <
class T =
double,
class E, std::
size_t N>
945 inline auto quantile(E&& e,
const T (&probas)[N], std::ptrdiff_t axis, T alpha, T beta)
947 return quantile(std::forward<E>(e),
adapt(probas, {N}), axis, alpha, beta);
959 template <
class T =
double,
class E,
class P>
966 template <
class T =
double,
class E, std::
size_t N>
967 inline auto quantile(E&& e,
const T (&probas)[N], T alpha, T beta)
969 return quantile(std::forward<E>(e),
adapt(probas, {N}), alpha, beta);
1008 template <
class T =
double,
class E,
class P>
1057 template <
class T =
double,
class E, std::
size_t N>
1061 return quantile(std::forward<E>(e),
adapt(probas, {N}), axis, method);
1075 template <
class T =
double,
class E,
class P>
1082 template <
class T =
double,
class E, std::
size_t N>
1085 return quantile(std::forward<E>(e),
adapt(probas, {N}), method);
1093 inline typename std::decay_t<E>::value_type median(E&& e)
1095 using value_type =
typename std::decay_t<E>::value_type;
1099 std::size_t szh = sz / 2;
1100 std::array<std::size_t, 2> kth = {szh - 1, szh};
1102 return (values[kth[0]] + values[kth[1]]) / value_type(2);
1106 std::array<std::size_t, 1> kth = {(sz - 1) / 2};
1108 return values[kth[0]];
1126 inline auto median(E&&
e, std::ptrdiff_t axis)
1128 std::size_t
ax = normalize_axis(
e.dimension(), axis);
1129 std::size_t
sz =
e.shape()[
ax];
1134 std::size_t
szh =
sz / 2;
1135 std::array<std::size_t, 2>
kth = {
szh - 1,
szh};
1142 std::size_t
szh = (
sz - 1) / 2;
1143 std::array<std::size_t, 1>
kth = {(
sz - 1) / 2};
1153 struct argfunc_result_type
1155 using type = xarray<std::size_t>;
1158 template <
class T, std::
size_t N>
1159 struct argfunc_result_type<
xtensor<T, N>>
1161 using type =
xtensor<std::size_t, N - 1>;
1164 template <layout_type L,
class E,
class F>
1165 inline typename argfunc_result_type<E>::type arg_func_impl(
const E& e, std::size_t axis, F&& cmp)
1167 using eval_type =
typename detail::sort_eval_type<E>::type;
1168 using value_type =
typename E::value_type;
1169 using result_type =
typename argfunc_result_type<E>::type;
1170 using result_shape_type =
typename result_type::shape_type;
1172 if (e.dimension() == 1)
1174 auto begin = e.template begin<L>();
1175 auto end = e.template end<L>();
1177 if (std::is_same<F, std::less<value_type>>::value)
1179 std::size_t i =
static_cast<std::size_t
>(std::distance(begin, std::min_element(begin, end)));
1180 return xtensor<size_t, 0>{i};
1184 std::size_t i =
static_cast<std::size_t
>(std::distance(begin, std::max_element(begin, end)));
1185 return xtensor<size_t, 0>{i};
1189 result_shape_type alt_shape;
1190 xt::resize_container(alt_shape, e.dimension() - 1);
1193 std::copy(e.shape().cbegin(), e.shape().cbegin() + std::ptrdiff_t(axis), alt_shape.begin());
1195 e.shape().cbegin() + std::ptrdiff_t(axis) + 1,
1197 alt_shape.begin() + std::ptrdiff_t(axis)
1200 result_type result = result_type::from_shape(std::move(alt_shape));
1201 auto result_iter = result.template begin<L>();
1203 auto arg_func_lambda = [&result_iter, &cmp](
auto begin,
auto end)
1205 std::size_t idx = 0;
1206 value_type val = *begin;
1208 for (std::size_t i = 1; begin != end; ++begin, ++i)
1210 if (cmp(*begin, val))
1220 if (axis != detail::leading_axis(e))
1222 dynamic_shape<std::size_t> permutation, reverse_permutation;
1226 ) = detail::get_permutations(e.dimension(), axis, e.layout());
1229 eval_type input =
transpose(e, permutation);
1230 detail::call_over_leading_axis(input, arg_func_lambda);
1235 auto&& input =
eval(e);
1236 detail::call_over_leading_axis(input, arg_func_lambda);
1242 template <layout_type L = XTENSOR_DEFAULT_TRAVERSAL,
class E>
1243 inline auto argmin(
const xexpression<E>& e)
1245 using value_type =
typename E::value_type;
1246 auto&& ed =
eval(e.derived_cast());
1247 auto begin = ed.template begin<L>();
1248 auto end = ed.template end<L>();
1249 std::size_t i =
static_cast<std::size_t
>(std::distance(begin, std::min_element(begin, end)));
1250 return xtensor<size_t, 0>{i};
1263 template <layout_type L = XTENSOR_DEFAULT_TRAVERSAL,
class E>
1266 using value_type =
typename E::value_type;
1267 auto&&
ed =
eval(
e.derived_cast());
1268 std::size_t
ax = normalize_axis(
ed.dimension(), axis);
1269 return detail::arg_func_impl<L>(
ed,
ax, std::less<value_type>());
1272 template <layout_type L = XTENSOR_DEFAULT_TRAVERSAL,
class E>
1273 inline auto argmax(
const xexpression<E>& e)
1275 using value_type =
typename E::value_type;
1276 auto&& ed =
eval(e.derived_cast());
1277 auto begin = ed.template begin<L>();
1278 auto end = ed.template end<L>();
1279 std::size_t i =
static_cast<std::size_t
>(std::distance(begin, std::max_element(begin, end)));
1280 return xtensor<size_t, 0>{i};
1294 template <layout_type L = XTENSOR_DEFAULT_TRAVERSAL,
class E>
1297 using value_type =
typename E::value_type;
1298 auto&&
ed =
eval(
e.derived_cast());
1299 std::size_t
ax = normalize_axis(
ed.dimension(), axis);
1300 return detail::arg_func_impl<L>(
ed,
ax, std::greater<value_type>());
1313 auto sorted = sort(
e, xnone());
1315 std::size_t
sz =
static_cast<std::size_t
>(std::distance(
sorted.begin(), end));
1317 using value_type =
typename E::value_type;
1319 std::copy(
sorted.begin(), end, result.begin());
1331 template <
class E1,
class E2>
1334 using value_type =
typename E1::value_type;
1343 std::size_t
sz =
static_cast<std::size_t
>(std::distance(
tmp.begin(), end));
1347 std::copy(
tmp.begin(), end, result.begin());
auto clip(E1 &&e1, E2 &&lo, E3 &&hi) noexcept -> detail::xfunction_type_t< math::clamp_fun, E1, E2, E3 >
Clip values between hi and lo.
auto floor(E &&e) noexcept -> detail::xfunction_type_t< math::floor_fun, E >
floor function.
auto sum(E &&e, X &&axes, EVS es=EVS())
Sum of elements over given axes.
auto mean(E &&e, X &&axes, EVS es=EVS())
Mean of elements over given axes.
auto adapt(C &&container, const SC &shape, layout_type l=L)
Constructs:
auto eval(T &&t) -> std::enable_if_t< detail::is_container< std::decay_t< T > >::value, T && >
Force evaluation of xexpression.
auto flatten(E &&e)
Return a flatten view of the given expression.
auto moveaxis(E &&e, std::ptrdiff_t src, std::ptrdiff_t dest)
Return a new expression with an axis move to a new position.
auto ravel(E &&e)
Return a flatten view of the given expression.
auto transpose(E &&e) noexcept
Returns a transpose view by reversing the dimensions of xexpression e.
quantile_method
Quantile interpolation method.
auto unique(const xexpression< E > &e)
Find unique elements of a xexpression.
R partition(const xexpression< E > &e, C kth_container, placeholders::xtuph)
Partially sort xexpression.
auto quantile(E &&e, const P &probas, std::ptrdiff_t axis, T alpha, T beta)
Compute quantiles over the given axis.
auto setdiff1d(const xexpression< E1 > &ar1, const xexpression< E2 > &ar2)
Find the set difference of two xexpressions.
R argpartition(const xexpression< E > &e, C kth_container, placeholders::xtuph)
Partially sort arguments.
@ weibull
Method 6 of (Hyndman and Fan, 1996) with alpha=0 and beta=0.
@ interpolated_inverted_cdf
Method 4 of (Hyndman and Fan, 1996) with alpha=0 and beta=1.
@ linear
Method 7 of (Hyndman and Fan, 1996) with alpha=1 and beta=1.
@ normal_unbiased
Method 9 of (Hyndman and Fan, 1996) with alpha=3/8 and beta=3/8.
@ median_unbiased
Method 8 of (Hyndman and Fan, 1996) with alpha=1/3 and beta=1/3.
@ hazen
Method 5 of (Hyndman and Fan, 1996) with alpha=1/2 and beta=1/2.
standard mathematical functions for xexpressions
auto range(A start_val, B stop_val)
Select a range from start_val to stop_val (excluded).
auto all() noexcept
Returns a slice representing a full dimension, to be used as an argument of view function.
xtensor_container< uvector< T, A >, N, L > xtensor
Alias template on xtensor_container with default parameters for data container type.
auto concatenate(std::tuple< CT... > &&t, std::size_t axis=0)
Concatenates xexpressions along axis.
xarray_container< uvector< T, A >, L, xt::svector< typename uvector< T, A >::size_type, 4, SA, true > > xarray
Alias template on xarray_container with default parameters for data container type and shape / stride...
std::vector< xstrided_slice< std::ptrdiff_t > > xstrided_slice_vector
vector of slices used to build a xstrided_view
auto index_view(E &&e, I &&indices) noexcept
creates an indexview from a container of indices.
sorting_method
Sorting method.
@ quick
Faster method but with no guarantee on preservation of order of equal elements https://en....
@ stable
Slower method but with guarantee on preservation of order of equal elements https://en....
auto strided_view(E &&e, S &&shape, X &&stride, std::size_t offset=0, layout_type layout=L) noexcept
Construct a strided view from an xexpression, shape, strides and offset.
auto xtuple(Types &&... args)
Creates tuples from arguments for concatenate and stack.
xfixed_container< T, FSH, L, Sharable > xtensor_fixed
Alias template on xfixed_container with default parameters for layout type.