10#ifndef XTENSOR_SORT_HPP
11#define XTENSOR_SORT_HPP
18#include <xtl/xcompare.hpp>
20#include "../containers/xadapt.hpp"
21#include "../containers/xarray.hpp"
22#include "../containers/xtensor.hpp"
23#include "../core/xeval.hpp"
24#include "../core/xmath.hpp"
25#include "../core/xtensor_config.hpp"
26#include "../core/xtensor_forward.hpp"
27#include "../misc/xmanipulation.hpp"
28#include "../views/xindex_view.hpp"
29#include "../views/xslice.hpp"
30#include "../views/xview.hpp"
44 std::ptrdiff_t adjust_secondary_stride(std::ptrdiff_t stride, T shape)
46 return stride != 0 ? stride :
static_cast<std::ptrdiff_t
>(shape);
50 inline std::ptrdiff_t get_secondary_stride(
const E& ev)
54 return adjust_secondary_stride(ev.strides()[ev.dimension() - 2], *(ev.shape().end() - 1));
57 return adjust_secondary_stride(ev.strides()[1], *(ev.shape().begin()));
61 inline std::size_t leading_axis_n_iters(
const E& ev)
65 return std::accumulate(
72 return std::accumulate(ev.shape().begin() + 1, ev.shape().end(), std::size_t(1), std::multiplies<>());
75 template <
class E,
class F>
76 inline void call_over_leading_axis(E& ev, F&& fct)
78 XTENSOR_ASSERT(ev.dimension() >= 2);
80 const std::size_t n_iters = leading_axis_n_iters(ev);
81 const std::ptrdiff_t secondary_stride = get_secondary_stride(ev);
83 const auto begin = ev.data();
84 const auto end = begin + n_iters * secondary_stride;
85 for (
auto iter = begin; iter != end; iter += secondary_stride)
87 fct(iter, iter + secondary_stride);
91 template <
class E1,
class E2,
class F>
92 inline void call_over_leading_axis(E1& e1, E2& e2, F&& fct)
94 XTENSOR_ASSERT(e1.dimension() >= 2);
95 XTENSOR_ASSERT(e1.dimension() == e2.dimension());
97 const std::size_t n_iters = leading_axis_n_iters(e1);
98 const std::ptrdiff_t secondary_stride1 = get_secondary_stride(e1);
99 const std::ptrdiff_t secondary_stride2 = get_secondary_stride(e2);
100 XTENSOR_ASSERT(secondary_stride1 == secondary_stride2);
102 const auto begin1 = e1.data();
103 const auto end1 = begin1 + n_iters * secondary_stride1;
104 const auto begin2 = e2.data();
105 const auto end2 = begin2 + n_iters * secondary_stride2;
108 for (; (iter1 != end1) && (iter2 != end2); iter1 += secondary_stride1, iter2 += secondary_stride2)
110 fct(iter1, iter1 + secondary_stride1, iter2, iter2 + secondary_stride2);
115 inline std::size_t leading_axis(
const E& e)
119 return e.dimension() - 1;
125 XTENSOR_THROW(std::runtime_error,
"Layout not supported.");
129 inline std::pair<dynamic_shape<std::size_t>, dynamic_shape<std::size_t>>
130 get_permutations(std::size_t dim, std::size_t ax,
layout_type layout)
132 dynamic_shape<std::size_t> permutation(dim);
133 std::iota(permutation.begin(), permutation.end(), std::size_t(0));
134 permutation.erase(permutation.begin() + std::ptrdiff_t(ax));
138 permutation.push_back(ax);
142 permutation.insert(permutation.begin(), ax);
146 dynamic_shape<std::size_t> reverse_permutation;
147 for (std::size_t i = 0; i < dim; ++i)
149 auto it = std::find(permutation.begin(), permutation.end(), i);
150 reverse_permutation.push_back(std::size_t(std::distance(permutation.begin(), it)));
153 return std::make_pair(std::move(permutation), std::move(reverse_permutation));
156 template <
class R,
class E,
class F>
157 inline R map_axis(
const E& e, std::ptrdiff_t axis, F&& lambda)
159 if (e.dimension() == 1)
162 lambda(res.begin(), res.end());
166 const std::size_t ax = normalize_axis(e.dimension(), axis);
167 if (ax == detail::leading_axis(e))
170 detail::call_over_leading_axis(res, std::forward<F>(lambda));
174 dynamic_shape<std::size_t> permutation, reverse_permutation;
175 std::tie(permutation, reverse_permutation) = get_permutations(e.dimension(), ax, e.layout());
177 detail::call_over_leading_axis(res, std::forward<F>(lambda));
178 res =
transpose(res, reverse_permutation);
183 struct flatten_sort_result_type_impl
188 template <
class VT, std::
size_t N, layout_type L>
189 struct flatten_sort_result_type_impl<
xtensor<VT, N, L>>
194 template <
class VT,
class S, layout_type L>
195 struct flatten_sort_result_type_impl<
xtensor_fixed<VT, S, L>>
197 using type = xtensor_fixed<VT, xshape<fixed_compute_size<S>::value>, L>;
201 struct flatten_sort_result_type : flatten_sort_result_type_impl<common_tensor_type_t<VT>>
206 using flatten_sort_result_type_t =
typename flatten_sort_result_type<VT>::type;
208 template <
class E,
class R = flatten_sort_result_type_t<E>>
209 inline auto flat_sort_impl(
const xexpression<E>& e)
211 const auto& de = e.derived_cast();
213 ev.resize({
static_cast<typename R::shape_type::value_type
>(de.size())});
215 std::copy(de.cbegin(), de.cend(), ev.begin());
216 std::sort(ev.begin(), ev.end());
225 return detail::flat_sort_impl(e);
231 struct sort_eval_type
233 using type =
typename T::temporary_type;
236 template <
class T, std::size_t... I,
layout_type L>
237 struct sort_eval_type<
xtensor_fixed<T, fixed_shape<I...>, L>>
239 using type =
xtensor<T,
sizeof...(I), L>;
257 using eval_type =
typename detail::sort_eval_type<E>::type;
259 return detail::map_axis<eval_type>(
262 [](
auto begin,
auto end)
264 std::sort(begin, end);
294 template <
class ConstRandomIt,
class RandomIt,
class Compare,
class Method>
295 inline void argsort_iter(
296 ConstRandomIt data_begin,
297 ConstRandomIt data_end,
304 XTENSOR_ASSERT(std::distance(data_begin, data_end) >= 0);
305 XTENSOR_ASSERT(std::distance(idx_begin, idx_end) == std::distance(data_begin, data_end));
308 std::iota(idx_begin, idx_end, 0);
316 [&](
const auto i,
const auto j)
318 return comp(*(data_begin + i), *(data_begin + j));
327 [&](
const auto i,
const auto j)
329 return comp(*(data_begin + i), *(data_begin + j));
336 template <
class ConstRandomIt,
class RandomIt,
class Method>
338 argsort_iter(ConstRandomIt data_begin, ConstRandomIt data_end, RandomIt idx_begin, RandomIt idx_end, Method method)
341 std::move(data_begin),
343 std::move(idx_begin),
345 [](
const auto& x,
const auto& y) ->
bool
353 template <
class VT,
class T>
354 struct rebind_value_type
359 template <
class VT,
class EC, layout_type L>
360 struct rebind_value_type<VT,
xarray<EC, L>>
365 template <
class VT,
class EC, std::
size_t N, layout_type L>
366 struct rebind_value_type<VT,
xtensor<EC, N, L>>
371 template <
class VT,
class ET,
class S, layout_type L>
377 template <
class VT,
class T>
378 struct flatten_rebind_value_type
380 using type =
typename rebind_value_type<VT, T>::type;
383 template <
class VT,
class EC, std::
size_t N, layout_type L>
384 struct flatten_rebind_value_type<VT,
xtensor<EC, N, L>>
389 template <
class VT,
class ET,
class S, layout_type L>
390 struct flatten_rebind_value_type<VT,
xtensor_fixed<ET, S, L>>
392 using type = xtensor_fixed<VT, xshape<fixed_compute_size<S>::value>, L>;
396 struct argsort_result_type
398 using type =
typename rebind_value_type<typename T::temporary_type::size_type, typename T::temporary_type>::type;
402 struct linear_argsort_result_type
404 using type =
typename flatten_rebind_value_type<
405 typename T::temporary_type::size_type,
406 typename T::temporary_type>::type;
409 template <class E, class R = typename detail::linear_argsort_result_type<E>::type,
class Method>
410 inline auto flatten_argsort_impl(const xexpression<E>& e, Method method)
412 const auto& de = e.derived_cast();
414 auto cit = de.template begin<layout_type::row_major>();
415 using const_iterator =
decltype(cit);
416 auto ad = xiterator_adaptor<const_iterator, const_iterator>(cit, cit, de.size());
418 using result_type = R;
420 result.resize({de.size()});
422 detail::argsort_iter(de.cbegin(), de.cend(), result.begin(), result.end(), method);
432 return detail::flatten_argsort_impl(e, method);
454 using eval_type =
typename detail::sort_eval_type<E>::type;
455 using result_type =
typename detail::argsort_result_type<eval_type>::type;
459 std::size_t ax = normalize_axis(de.dimension(), axis);
461 if (de.dimension() == 1)
463 return detail::flatten_argsort_impl<E, result_type>(e, method);
466 const auto argsort = [&method](
auto res_begin,
auto res_end,
auto ev_begin,
auto ev_end)
468 detail::argsort_iter(ev_begin, ev_end, res_begin, res_end, method);
471 if (ax == detail::leading_axis(de))
473 result_type res = result_type::from_shape(de.shape());
474 detail::call_over_leading_axis(res, de, argsort);
478 dynamic_shape<std::size_t> permutation, reverse_permutation;
479 std::tie(permutation, reverse_permutation) = detail::get_permutations(de.dimension(), ax, de.layout());
480 eval_type ev =
transpose(de, permutation);
481 result_type res = result_type::from_shape(ev.shape());
482 detail::call_over_leading_axis(res, ev, argsort);
483 res =
transpose(res, reverse_permutation);
504 template <
class RandomIt,
class Iter,
class Compare>
506 partition_iter(RandomIt data_begin, RandomIt data_end, Iter kth_begin, Iter kth_end, Compare comp)
508 XTENSOR_ASSERT(std::distance(data_begin, data_end) >= 0);
509 XTENSOR_ASSERT(std::distance(kth_begin, kth_end) >= 0);
511 using idx_type =
typename std::iterator_traits<Iter>::value_type;
513 idx_type k_last =
static_cast<idx_type
>(std::distance(data_begin, data_end));
514 for (; kth_begin != kth_end; ++kth_begin)
516 std::nth_element(data_begin, data_begin + *kth_begin, data_begin + k_last, std::move(comp));
521 template <
class RandomIt,
class Iter>
522 inline void partition_iter(RandomIt data_begin, RandomIt data_end, Iter kth_begin, Iter kth_end)
524 return partition_iter(
525 std::move(data_begin),
527 std::move(kth_begin),
529 [](
const auto& x,
const auto& y) ->
bool
564 template <
class E, xtl::non_
integral_concept C,
class R = detail::flatten_sort_result_type_t<E>>
569 R ev = R::from_shape({de.size()});
570 std::sort(kth_container.begin(), kth_container.end());
572 std::copy(de.linear_cbegin(), de.linear_cend(), ev.linear_begin());
574 detail::partition_iter(ev.linear_begin(), ev.linear_end(), kth_container.rbegin(), kth_container.rend());
579 template <
class E,
class I, std::
size_t N,
class R = detail::flatten_sort_result_type_t<E>>
580 inline R
partition(
const xexpression<E>& e,
const I (&kth_container)[N], placeholders::xtuph tag)
584 xtl::forward_sequence<std::array<std::size_t, N>,
decltype(kth_container)>(kth_container),
589 template <
class E,
class R = detail::flatten_sort_result_type_t<E>>
592 return partition(e, std::array<std::size_t, 1>({kth}), tag);
595 template <
class E, xtl::non_
integral_concept C>
598 using eval_type =
typename detail::sort_eval_type<E>::type;
600 std::sort(kth_container.begin(), kth_container.end());
602 return detail::map_axis<eval_type>(
605 [&kth_container](
auto begin,
auto end)
607 detail::partition_iter(begin, end, kth_container.rbegin(), kth_container.rend());
612 template <
class E,
class T, std::
size_t N>
617 xtl::forward_sequence<std::array<std::size_t, N>,
decltype(kth_container)>(kth_container),
625 return partition(e, std::array<std::size_t, 1>({kth}), axis);
657 xtl::non_integral_concept C,
658 class R =
typename detail::linear_argsort_result_type<typename detail::sort_eval_type<E>::type>::type>
661 using eval_type =
typename detail::sort_eval_type<E>::type;
662 using result_type =
typename detail::linear_argsort_result_type<eval_type>::type;
666 result_type res = result_type::from_shape({de.size()});
668 std::sort(kth_container.begin(), kth_container.end());
670 std::iota(res.linear_begin(), res.linear_end(), 0);
672 detail::partition_iter(
675 kth_container.rbegin(),
676 kth_container.rend(),
677 [&de](std::size_t a, std::size_t b)
679 return de[a] < de[b];
686 template <
class E,
class I, std::
size_t N>
687 inline auto argpartition(
const xexpression<E>& e,
const I (&kth_container)[N], placeholders::xtuph tag)
691 xtl::forward_sequence<std::array<std::size_t, N>,
decltype(kth_container)>(kth_container),
699 return argpartition(e, std::array<std::size_t, 1>({kth}), tag);
702 template <
class E, xtl::non_
integral_concept C>
705 using eval_type =
typename detail::sort_eval_type<E>::type;
706 using result_type =
typename detail::argsort_result_type<eval_type>::type;
708 const auto& de = e.derived_cast();
710 if (de.dimension() == 1)
715 std::sort(kth_container.begin(), kth_container.end());
716 const auto argpartition_w_kth =
717 [&kth_container](
auto res_begin,
auto res_end,
auto ev_begin,
auto )
719 std::iota(res_begin, res_end, 0);
720 detail::partition_iter(
723 kth_container.rbegin(),
724 kth_container.rend(),
725 [&ev_begin](
auto const& i,
auto const& j)
727 return *(ev_begin + i) < *(ev_begin + j);
732 const std::size_t ax = normalize_axis(de.dimension(), axis);
733 if (ax == detail::leading_axis(de))
735 result_type res = result_type::from_shape(de.shape());
736 detail::call_over_leading_axis(res, de, argpartition_w_kth);
740 dynamic_shape<std::size_t> permutation, reverse_permutation;
741 std::tie(permutation, reverse_permutation) = detail::get_permutations(de.dimension(), ax, de.layout());
742 eval_type ev =
transpose(de, permutation);
743 result_type res = result_type::from_shape(ev.shape());
744 detail::call_over_leading_axis(res, ev, argpartition_w_kth);
745 res =
transpose(res, reverse_permutation);
749 template <
class E,
class I, std::
size_t N>
754 xtl::forward_sequence<std::array<std::size_t, N>,
decltype(kth_container)>(kth_container),
762 return argpartition(e, std::array<std::size_t, 1>({kth}), axis);
771 template <
class S,
class I,
class K,
class O>
772 inline void select_indices_impl(
776 std::size_t current_dim,
777 const K& current_index,
781 using id_t =
typename K::value_type;
782 if ((current_dim < shape.size() - 1) && (current_dim == axis))
784 for (
auto i : indices)
786 auto idx = current_index;
787 idx[current_dim] = i;
788 select_indices_impl(shape, indices, axis, current_dim + 1, idx, out);
791 else if ((current_dim < shape.size() - 1) && (current_dim != axis))
793 for (id_t i = 0; xtl::cmp_less(i, shape[current_dim]); ++i)
795 auto idx = current_index;
796 idx[current_dim] = i;
797 select_indices_impl(shape, indices, axis, current_dim + 1, idx, out);
800 else if ((current_dim == shape.size() - 1) && (current_dim == axis))
802 for (
auto i : indices)
804 auto idx = current_index;
805 idx[current_dim] = i;
806 out.push_back(std::move(idx));
809 else if ((current_dim == shape.size() - 1) && (current_dim != axis))
811 for (id_t i = 0; xtl::cmp_less(i, shape[current_dim]); ++i)
813 auto idx = current_index;
814 idx[current_dim] = i;
815 out.push_back(std::move(idx));
820 template <
class S,
class I>
821 inline auto select_indices(
const S& shape,
const I& indices, std::size_t axis)
823 using index_type = get_strides_t<S>;
824 auto out = std::vector<index_type>();
825 select_indices_impl(shape, indices, axis, 0, xtl::make_sequence<index_type>(shape.size()), out);
831 template <
class E,
class I>
832 inline auto fancy_indexing(E&& e,
const I& indices, std::ptrdiff_t axis)
834 const std::size_t ax = normalize_axis(e.dimension(), axis);
835 using shape_t = get_strides_t<typename std::decay_t<E>::shape_type>;
836 auto shape = xtl::forward_sequence<shape_t,
decltype(e.shape())>(e.shape());
837 shape[ax] = indices.size();
839 index_view(std::forward<E>(e), select_indices(e.shape(), indices, ax)),
844 template <
class T,
class I,
class P>
845 inline auto quantile_kth_gamma(std::size_t n,
const P& probas, T alpha, T beta)
847 const auto m = alpha + probas * (T(1) - alpha - beta);
849 const auto p_n_m =
eval(probas *
static_cast<T
>(n) + m - 1);
851 const auto j =
floor(p_n_m);
857 return std::make_pair(
eval(k_kp1),
eval(omg_g));
862 inline auto unsqueeze_shape(
const S& shape, std::size_t axis)
864 XTENSOR_ASSERT(axis <= shape.size());
865 auto new_shape = xtl::forward_sequence<xt::svector<std::size_t>,
decltype(shape)>(shape);
866 new_shape.insert(new_shape.begin() + axis, 1);
900 template <
class T =
double,
class E,
class P>
901 inline auto quantile(E&& e,
const P& probas, std::ptrdiff_t axis, T alpha, T beta)
903 XTENSOR_ASSERT(
all(0. <= probas));
904 XTENSOR_ASSERT(
all(probas <= 1.));
905 XTENSOR_ASSERT(0. <= alpha);
906 XTENSOR_ASSERT(alpha <= 1.);
907 XTENSOR_ASSERT(0. <= beta);
908 XTENSOR_ASSERT(beta <= 1.);
910 using tmp_shape_t = get_strides_t<typename std::decay_t<E>::shape_type>;
911 using id_t =
typename tmp_shape_t::value_type;
913 const std::size_t ax = normalize_axis(e.dimension(), axis);
914 const std::size_t n = e.shape()[ax];
915 auto kth_gamma = detail::quantile_kth_gamma<T, id_t, P>(n, probas, alpha, beta);
918 auto e_partition =
xt::partition(std::forward<E>(e), kth_gamma.first, ax);
919 auto e_kth = detail::fancy_indexing(std::move(e_partition), std::move(kth_gamma.first), ax);
922 auto gm1_g_shape = xtl::make_sequence<tmp_shape_t>(e.dimension(), 1);
923 gm1_g_shape[ax] = kth_gamma.second.size();
924 auto gm1_g_reshaped = reshape_view(std::move(kth_gamma.second), std::move(gm1_g_shape));
928 auto e_kth_g = std::move(e_kth) * std::move(gm1_g_reshaped);
930 auto e_kth_g_shape = detail::unsqueeze_shape(e_kth_g.shape(), ax);
931 e_kth_g_shape[ax] = 2;
932 e_kth_g_shape[ax + 1] /= 2;
933 auto quantiles =
xt::sum(reshape_view(std::move(e_kth_g), std::move(e_kth_g_shape)), ax);
939 template <
class T =
double,
class E, std::
size_t N>
940 inline auto quantile(E&& e,
const T (&probas)[N], std::ptrdiff_t axis, T alpha, T beta)
942 return quantile(std::forward<E>(e),
adapt(probas, {N}), axis, alpha, beta);
954 template <
class T =
double,
class E,
class P>
955 inline auto quantile(E&& e,
const P& probas, T alpha, T beta)
961 template <
class T =
double,
class E, std::
size_t N>
962 inline auto quantile(E&& e,
const T (&probas)[N], T alpha, T beta)
964 return quantile(std::forward<E>(e),
adapt(probas, {N}), alpha, beta);
1003 template <
class T =
double,
class E,
class P>
1048 return quantile(std::forward<E>(e), probas, axis, alpha, beta);
1052 template <
class T =
double,
class E, std::
size_t N>
1056 return quantile(std::forward<E>(e),
adapt(probas, {N}), axis, method);
1070 template <
class T =
double,
class E,
class P>
1077 template <
class T =
double,
class E, std::
size_t N>
1080 return quantile(std::forward<E>(e),
adapt(probas, {N}), method);
1088 inline typename std::decay_t<E>::value_type median(E&& e)
1090 using value_type =
typename std::decay_t<E>::value_type;
1094 std::size_t szh = sz / 2;
1095 std::array<std::size_t, 2> kth = {szh - 1, szh};
1097 return (values[kth[0]] + values[kth[1]]) / value_type(2);
1101 std::array<std::size_t, 1> kth = {(sz - 1) / 2};
1103 return values[kth[0]];
1121 inline auto median(E&& e, std::ptrdiff_t axis)
1123 std::size_t ax = normalize_axis(e.dimension(), axis);
1124 std::size_t sz = e.shape()[ax];
1129 std::size_t szh = sz / 2;
1130 std::array<std::size_t, 2> kth = {szh - 1, szh};
1131 auto values =
xt::partition(std::forward<E>(e), kth,
static_cast<ptrdiff_t
>(ax));
1137 std::size_t szh = (sz - 1) / 2;
1138 std::array<std::size_t, 1> kth = {(sz - 1) / 2};
1139 auto values =
xt::partition(std::forward<E>(e), kth,
static_cast<ptrdiff_t
>(ax));
1148 struct argfunc_result_type
1153 template <
class T, std::
size_t N>
1154 struct argfunc_result_type<
xtensor<T, N>>
1156 using type =
xtensor<std::size_t, N - 1>;
1159 template <layout_type L,
class E,
class F>
1160 inline typename argfunc_result_type<E>::type arg_func_impl(
const E& e, std::size_t axis, F&& cmp)
1162 using eval_type =
typename detail::sort_eval_type<E>::type;
1163 using value_type =
typename E::value_type;
1164 using result_type =
typename argfunc_result_type<E>::type;
1165 using result_shape_type =
typename result_type::shape_type;
1167 if (e.dimension() == 1)
1169 auto begin = e.template begin<L>();
1170 auto end = e.template end<L>();
1172 if (std::is_same<F, std::less<value_type>>::value)
1174 std::size_t i =
static_cast<std::size_t
>(std::distance(begin, std::min_element(begin, end)));
1179 std::size_t i =
static_cast<std::size_t
>(std::distance(begin, std::max_element(begin, end)));
1184 result_shape_type alt_shape;
1185 xt::resize_container(alt_shape, e.dimension() - 1);
1188 std::copy(e.shape().cbegin(), e.shape().cbegin() + std::ptrdiff_t(axis), alt_shape.begin());
1190 e.shape().cbegin() + std::ptrdiff_t(axis) + 1,
1192 alt_shape.begin() + std::ptrdiff_t(axis)
1195 result_type result = result_type::from_shape(std::move(alt_shape));
1196 auto result_iter = result.template begin<L>();
1198 auto arg_func_lambda = [&result_iter, &cmp](
auto begin,
auto end)
1200 std::size_t idx = 0;
1201 value_type val = *begin;
1203 for (std::size_t i = 1; begin != end; ++begin, ++i)
1205 if (cmp(*begin, val))
1215 if (axis != detail::leading_axis(e))
1217 dynamic_shape<std::size_t> permutation, reverse_permutation;
1221 ) = detail::get_permutations(e.dimension(), axis, e.layout());
1224 eval_type input =
transpose(e, permutation);
1225 detail::call_over_leading_axis(input, arg_func_lambda);
1230 auto&& input =
eval(e);
1231 detail::call_over_leading_axis(input, arg_func_lambda);
1237 template <layout_type L = XTENSOR_DEFAULT_TRAVERSAL,
class E>
1240 using value_type =
typename E::value_type;
1241 auto&& ed =
eval(e.derived_cast());
1242 auto begin = ed.template begin<L>();
1243 auto end = ed.template end<L>();
1244 std::size_t i =
static_cast<std::size_t
>(std::distance(begin, std::min_element(begin, end)));
1258 template <layout_type L = XTENSOR_DEFAULT_TRAVERSAL,
class E>
1261 using value_type =
typename E::value_type;
1263 std::size_t ax = normalize_axis(ed.dimension(), axis);
1264 return detail::arg_func_impl<L>(ed, ax, std::less<value_type>());
1267 template <layout_type L = XTENSOR_DEFAULT_TRAVERSAL,
class E>
1268 inline auto argmax(
const xexpression<E>& e)
1270 using value_type =
typename E::value_type;
1271 auto&& ed =
eval(e.derived_cast());
1272 auto begin = ed.template begin<L>();
1273 auto end = ed.template end<L>();
1274 std::size_t i =
static_cast<std::size_t
>(std::distance(begin, std::max_element(begin, end)));
1289 template <layout_type L = XTENSOR_DEFAULT_TRAVERSAL,
class E>
1292 using value_type =
typename E::value_type;
1294 std::size_t ax = normalize_axis(ed.dimension(), axis);
1295 return detail::arg_func_impl<L>(ed, ax, std::greater<value_type>());
1308 auto sorted = sort(e, xnone());
1309 auto end = std::unique(sorted.begin(), sorted.end());
1310 std::size_t sz =
static_cast<std::size_t
>(std::distance(sorted.begin(), end));
1312 using value_type =
typename E::value_type;
1313 auto result = xtensor<value_type, 1>::from_shape({sz});
1314 std::copy(sorted.begin(), end, result.begin());
1326 template <
class E1,
class E2>
1329 using value_type =
typename E1::value_type;
1331 auto unique1 =
unique(ar1);
1332 auto unique2 =
unique(ar2);
1334 auto tmp = xtensor<value_type, 1>::from_shape({unique1.size()});
1336 auto end = std::set_difference(unique1.begin(), unique1.end(), unique2.begin(), unique2.end(), tmp.begin());
1338 std::size_t sz =
static_cast<std::size_t
>(std::distance(tmp.begin(), end));
1340 auto result = xtensor<value_type, 1>::from_shape({sz});
1342 std::copy(tmp.begin(), end, result.begin());
Base class for xexpressions.
derived_type & derived_cast() &noexcept
Returns a reference to the actual derived type of the xexpression.
auto clip(E1 &&e1, E2 &&lo, E3 &&hi) noexcept -> detail::xfunction_type_t< math::clamp_fun, E1, E2, E3 >
Clip values between hi and lo.
auto cast(E &&e) noexcept -> detail::xfunction_type_t< typename detail::cast< R >::functor, E >
Element-wise static_cast.
auto floor(E &&e) noexcept -> detail::xfunction_type_t< math::floor_fun, E >
floor function.
auto sum(E &&e, X &&axes, EVS es=EVS())
Sum of elements over given axes.
auto mean(E &&e, X &&axes, EVS es=EVS())
Mean of elements over given axes.
auto adapt(C &&container, const SC &shape, layout_type l=L)
Constructs:
auto eval(T &&t) -> std::enable_if_t< detail::is_container< std::decay_t< T > >::value, T && >
Force evaluation of xexpression.
auto flatten(E &&e)
Return a flatten view of the given expression.
auto moveaxis(E &&e, std::ptrdiff_t src, std::ptrdiff_t dest)
Return a new expression with an axis move to a new position.
auto ravel(E &&e)
Return a flatten view of the given expression.
auto transpose(E &&e) noexcept
Returns a transpose view by reversing the dimensions of xexpression e.
quantile_method
Quantile interpolation method.
auto unique(const xexpression< E > &e)
Find unique elements of a xexpression.
R partition(const xexpression< E > &e, C kth_container, placeholders::xtuph)
Partially sort xexpression.
auto quantile(E &&e, const P &probas, std::ptrdiff_t axis, T alpha, T beta)
Compute quantiles over the given axis.
auto setdiff1d(const xexpression< E1 > &ar1, const xexpression< E2 > &ar2)
Find the set difference of two xexpressions.
R argpartition(const xexpression< E > &e, C kth_container, placeholders::xtuph)
Partially sort arguments.
@ weibull
Method 6 of (Hyndman and Fan, 1996) with alpha=0 and beta=0.
@ interpolated_inverted_cdf
Method 4 of (Hyndman and Fan, 1996) with alpha=0 and beta=1.
@ linear
Method 7 of (Hyndman and Fan, 1996) with alpha=1 and beta=1.
@ normal_unbiased
Method 9 of (Hyndman and Fan, 1996) with alpha=3/8 and beta=3/8.
@ median_unbiased
Method 8 of (Hyndman and Fan, 1996) with alpha=1/3 and beta=1/3.
@ hazen
Method 5 of (Hyndman and Fan, 1996) with alpha=1/2 and beta=1/2.
standard mathematical functions for xexpressions
auto range(A start_val, B stop_val)
Select a range from start_val to stop_val (excluded).
auto all() noexcept
Returns a slice representing a full dimension, to be used as an argument of view function.
std::vector< xstrided_slice< std::ptrdiff_t > > xstrided_slice_vector
vector of slices used to build a xstrided_view
xarray_container< uvector< T, A >, L, xt::svector< typename uvector< T, A >::size_type, 4, SA, true > > xarray
Alias template on xarray_container with default parameters for data container type and shape / stride...
auto concatenate(std::tuple< CT... > &&t, std::size_t axis=0)
Concatenates xexpressions along axis.
xfixed_container< T, FSH, L, Sharable > xtensor_fixed
Alias template on xfixed_container with default parameters for layout type.
xtensor_container< uvector< T, A >, N, L > xtensor
Alias template on xtensor_container with default parameters for data container type.
auto index_view(E &&e, I &&indices) noexcept
creates an indexview from a container of indices.
sorting_method
Sorting method.
@ quick
Faster method but with no guarantee on preservation of order of equal elements https://en....
@ stable
Slower method but with guarantee on preservation of order of equal elements https://en....
auto strided_view(E &&e, S &&shape, X &&stride, std::size_t offset=0, layout_type layout=L) noexcept
Construct a strided view from an xexpression, shape, strides and offset.
auto xtuple(Types &&... args)
Creates tuples from arguments for concatenate and stack.