1
1
mirror of https://github.com/OpenVoiceOS/OpenVoiceOS synced 2025-06-05 22:19:21 +02:00

Add pre-installed python packages within rootfs overlay

TODO: This is just a quick fix and need to be changed to
buildroot packages in the upcoming days/weeks/months.
This commit is contained in:
j1nx
2022-12-08 21:02:30 +01:00
parent ce2443f753
commit 37d97e1551
36609 changed files with 7287696 additions and 0 deletions

View File

@ -0,0 +1,45 @@
#ifndef PYTHONIC_UTILS_ARRAY_HELPER_HPP
#define PYTHONIC_UTILS_ARRAY_HELPER_HPP
#include "pythonic/include/utils/array_helper.hpp"
#include "pythonic/types/tuple.hpp"
PYTHONIC_NS_BEGIN
/* recursively return the value at the position given by `indices' in the
* `self' "array like". It may be a sub array instead of real value.
* indices[0] is the coordinate for the first dimension && indices[M-1] is
* for the last one.
*/
template <size_t L>
template <class A, size_t M>
auto nget<L>::operator()(A &&self, types::array<long, M> const &indices)
-> decltype(nget<L - 1>()(std::forward<A>(self)[0], indices))
{
return nget<L - 1>()(std::forward<A>(self)[indices[M - L - 1]], indices);
}
template <size_t L>
template <class A, size_t M>
auto nget<L>::fast(A &&self, types::array<long, M> const &indices)
-> decltype(nget<L - 1>().fast(std::forward<A>(self).fast(0), indices))
{
return nget<L - 1>().fast(std::forward<A>(self).fast(indices[M - L - 1]),
indices);
}
template <class A, size_t M>
auto nget<0>::operator()(A &&self, types::array<long, M> const &indices)
-> decltype(std::forward<A>(self)[indices[M - 1]])
{
return std::forward<A>(self)[indices[M - 1]];
}
template <class A, size_t M>
auto nget<0>::fast(A &&self, types::array<long, M> const &indices)
-> decltype(std::forward<A>(self).fast(indices[M - 1]))
{
return std::forward<A>(self).fast(indices[M - 1]);
}
PYTHONIC_NS_END
#endif

View File

@ -0,0 +1,449 @@
#ifndef PYTHONIC_UTILS_BROADCAST_COPY_HPP
#define PYTHONIC_UTILS_BROADCAST_COPY_HPP
#include "pythonic/include/utils/broadcast_copy.hpp"
#include "pythonic/types/tuple.hpp"
PYTHONIC_NS_BEGIN
namespace utils
{
/* helper for specialization of the broadcasting, vectorizing copy operator
* due to expression templates, this may also triggers a lot of
*computations!
*
* ``vector_form'' is set to true if the operation can be done using
*Boost.SIMD
*
* the call operator has four template parameters:
*
* template <class E, class F, size_t N>
* void operator()(E &&self, F const &other, utils::int_<N>, utils::int_<M>)
*
* ``E'' is the type of the object to which the data are copied
*
* ``F'' is the type of the object from which the data are copied
*
* ``N'' is the depth of the loop nest. When it reaches ``1'', we have a raw
*loop
* that may be vectorizable
*
* ``D'' is the delta between the number of dimensions of E && F. When set
*to a
* value greater than ``0'', some broadcasting is needed
*/
template <typename vector_form, size_t N, size_t D>
struct _broadcast_copy;
struct fast_novectorize {
};
template <>
struct _broadcast_copy<fast_novectorize, 0, 0> {
template <class E, class F, class SelfIndices, class OtherIndices,
size_t... Is>
void helper(E &&self, F const &other, SelfIndices &&self_indices,
OtherIndices &&other_indices, utils::index_sequence<Is...>)
{
std::forward<E>(self)
.store((typename std::decay<E>::type::dtype)other.load(
(long)std::get<Is>(other_indices)...),
(long)std::get<Is>(self_indices)...);
}
template <class E, class F, class SelfIndices, class OtherIndices>
void operator()(E &&self, F const &other, SelfIndices &&self_indices,
OtherIndices &&other_indices)
{
helper(std::forward<E>(self), other, self_indices, other_indices,
utils::make_index_sequence<std::tuple_size<
typename std::decay<SelfIndices>::type>::value>());
}
};
template <size_t N>
struct _broadcast_copy<fast_novectorize, N, 0> {
template <class E, class F, class SelfIndices, class OtherIndices>
void operator()(E &&self, F const &other, SelfIndices &&self_indices,
OtherIndices &&other_indices)
{
long const other_size =
other.template shape<std::decay<E>::type::value - N>();
long const self_size =
self.template shape<std::decay<E>::type::value - N>();
if (self_size == other_size)
for (long i = 0; i < self_size; ++i)
_broadcast_copy<fast_novectorize, N - 1, 0>{}(
std::forward<E>(self), other,
std::tuple_cat(self_indices, std::make_tuple(i)),
std::tuple_cat(other_indices, std::make_tuple(i)));
else
for (long i = 0; i < self_size; ++i)
_broadcast_copy<fast_novectorize, N - 1, 0>{}(
std::forward<E>(self), other,
std::tuple_cat(self_indices, std::make_tuple(i)),
std::tuple_cat(other_indices, std::make_tuple(0)));
}
};
template <size_t N, size_t D>
struct _broadcast_copy<fast_novectorize, N, D> {
template <class E, class F, class SelfIndices, class OtherIndices>
void operator()(E &&self, F const &other, SelfIndices &&self_indices,
OtherIndices &&other_indices)
{
using broadcaster = typename std::conditional<
types::is_dtype<F>::value,
types::broadcast<F, typename std::decay<E>::type::dtype>,
types::broadcasted<F>>::type;
_broadcast_copy<fast_novectorize, N, D - 1>{}(
std::forward<E>(self), broadcaster(other),
std::forward<SelfIndices>(self_indices),
std::forward<OtherIndices>(other_indices));
}
};
template <size_t N, class vectorizer>
struct _broadcast_copy<vectorizer, N, 0> {
template <class E, class F, class... Indices>
void operator()(E &&self, F const &other, Indices... indices)
{
long self_size = std::distance(self.begin(), self.end()),
other_size = std::distance(other.begin(), other.end());
std::copy(other.begin(), other.end(), self.begin());
// eventually repeat the pattern
for (long i = other_size; i < self_size; i += other_size)
std::copy_n(self.begin(), other_size, self.begin() + i);
}
};
// ``D'' is not ``0'' so we should broadcast
template <class vectorizer, size_t N, size_t D>
struct _broadcast_copy {
template <class E, class F>
void operator()(E &&self, F const &other)
{
if (types::is_dtype<F>::value) {
std::fill(self.begin(), self.end(), other);
} else {
auto sfirst = self.begin();
*sfirst = other;
std::fill(self.begin() + 1, self.end(), *sfirst);
}
}
template <class E, class F, class ES, class FS>
void operator()(E &&self, F const &other, ES, FS)
{
if (types::is_dtype<F>::value) {
std::fill(self.begin(), self.end(), other);
} else {
auto sfirst = self.begin();
*sfirst = other;
std::fill(self.begin() + 1, self.end(), *sfirst);
}
}
};
#ifdef USE_XSIMD
// specialize for SIMD only if available
// otherwise use the std::copy fallback
template <class vectorizer, class E, class F>
void vbroadcast_copy(E &&self, F const &other)
{
using T = typename F::dtype;
using vT = xsimd::batch<T>;
static const std::size_t vN = vT::size;
long self_size = std::distance(self.begin(), self.end()),
other_size = std::distance(other.begin(), other.end());
auto oiter = vectorizer::vbegin(other);
const long bound =
std::distance(vectorizer::vbegin(other), vectorizer::vend(other));
for (auto iter = vectorizer::vbegin(self), end = vectorizer::vend(self);
iter != end; ++iter, ++oiter) {
iter.store(*oiter);
}
// tail
{
auto siter = self.begin();
auto oiter = other.begin();
for (long i = bound * vN; i < other_size; ++i)
*(siter + i) = *(oiter + i);
}
for (long i = other_size; i < self_size; i += other_size)
std::copy_n(self.begin(), other_size, self.begin() + i);
}
template <>
struct _broadcast_copy<types::vectorizer, 1, 0> {
template <class E, class F>
void operator()(E &&self, F const &other)
{
return vbroadcast_copy<types::vectorizer>(std::forward<E>(self), other);
}
};
template <>
struct _broadcast_copy<types::vectorizer_nobroadcast, 1, 0> {
template <class E, class F>
void operator()(E &&self, F const &other)
{
return vbroadcast_copy<types::vectorizer_nobroadcast>(
std::forward<E>(self), other);
}
};
#endif
template <class E, class F, size_t N, size_t D, bool vector_form>
struct broadcast_copy_dispatcher;
template <class E, class F, size_t N, size_t D>
struct broadcast_copy_dispatcher<E, F, N, D, false> {
void operator()(E &self, F const &other)
{
if (utils::no_broadcast_ex(other))
_broadcast_copy<fast_novectorize, N, D>{}(
self, other, std::make_tuple(), std::make_tuple());
else
_broadcast_copy<types::novectorize, N, D>{}(self, other);
}
};
template <class E, class F, size_t N, size_t D>
struct broadcast_copy_dispatcher<E, F, N, D, true> {
void operator()(E &self, F const &other)
{
if (utils::no_broadcast_ex(other))
_broadcast_copy<fast_novectorize, N, D>{}(
self, other, std::make_tuple(), std::make_tuple());
else
_broadcast_copy<types::vectorizer, N, D>{}(self, other);
}
};
template <class E, class F, size_t N, size_t D, bool vector_form>
E &broadcast_copy(E &self, F const &other)
{
if (self.size())
broadcast_copy_dispatcher<E, F, N, D, vector_form>{}(self, other);
return self;
}
/* update
*/
// ``D'' is not ``0'' so we should broadcast
template <class Op, typename vector_form, size_t N, size_t D>
struct _broadcast_update {
template <class E, class F>
void operator()(E &&self, F const &other)
{
long n = self.template shape<0>();
auto siter = self.begin();
for (long i = 0; i < n; ++i)
Op{}(*(siter + i), other);
}
};
template <class Op, size_t N, class vector_form>
struct _broadcast_update<Op, vector_form, N, 0> {
template <class E, class F>
void operator()(E &&self, F const &other)
{
long other_size = std::distance(other.begin(), other.end());
auto siter = self.begin();
auto oiter = other.begin();
if (other_size == 1) {
auto value = *oiter;
for (auto send = self.end(); siter != send; ++siter)
Op{}(*siter, value);
} else
for (auto send = self.end(); siter != send;) {
auto ooiter = oiter;
for (long i = 0; i < other_size; ++i, ++siter, ++ooiter)
Op{}(*siter, *ooiter);
}
}
template <class E, class F0, class F1>
void operator()(E &&self, types::broadcast<F0, F1> const &other)
{
auto value = *other.begin();
for (auto siter = self.begin(), send = self.end(); siter != send; ++siter)
Op{}(*siter, value);
}
template <class E, class F>
void operator()(E &&self, types::broadcasted<F> const &other)
{
auto value = *other.end();
for (auto siter = self.begin(), send = self.end(); siter != send; ++siter)
Op{}(*siter, value);
}
};
template <class Op>
struct _broadcast_update<Op, fast_novectorize, 0, 0> {
template <class E, class F, class SelfIndices, class OtherIndices,
size_t... Is>
void helper(E &&self, F const &other, SelfIndices &&self_indices,
OtherIndices &&other_indices, utils::index_sequence<Is...>)
{
self.template update<Op>(other.load((long)std::get<Is>(other_indices)...),
(long)std::get<Is>(self_indices)...);
}
template <class E, class F, class SelfIndices, class OtherIndices>
void operator()(E &&self, F const &other, SelfIndices &&self_indices,
OtherIndices &&other_indices)
{
helper(std::forward<E>(self), other, self_indices, other_indices,
utils::make_index_sequence<std::tuple_size<
typename std::decay<SelfIndices>::type>::value>());
}
};
template <class Op, size_t N>
struct _broadcast_update<Op, fast_novectorize, N, 0> {
template <class E, class F, class SelfIndices, class OtherIndices>
void operator()(E &&self, F const &other, SelfIndices &&self_indices,
OtherIndices &&other_indices)
{
auto const other_size =
other.template shape<std::decay<E>::type::value - N>();
auto const self_size =
self.template shape<std::decay<E>::type::value - N>();
if (self_size == other_size)
for (long i = 0; i < self_size; ++i)
_broadcast_update<Op, fast_novectorize, N - 1, 0>{}(
std::forward<E>(self), other,
std::tuple_cat(self_indices, std::make_tuple(i)),
std::tuple_cat(other_indices, std::make_tuple(i)));
else
for (long i = 0; i < self_size; ++i)
_broadcast_update<Op, fast_novectorize, N - 1, 0>{}(
std::forward<E>(self), other,
std::tuple_cat(self_indices, std::make_tuple(i)),
std::tuple_cat(other_indices, std::make_tuple(0)));
}
};
template <class Op, size_t N, size_t D>
struct _broadcast_update<Op, fast_novectorize, N, D> {
template <class E, class F, class SelfIndices, class OtherIndices>
void operator()(E &&self, F const &other, SelfIndices &&self_indices,
OtherIndices &&other_indices)
{
using broadcaster = typename std::conditional<
types::is_dtype<F>::value,
types::broadcast<F, typename std::decay<E>::type::dtype>,
types::broadcasted<F>>::type;
_broadcast_update<Op, fast_novectorize, N, D - 1>{}(
std::forward<E>(self), broadcaster(other),
std::forward<SelfIndices>(self_indices),
std::forward<OtherIndices>(other_indices));
}
};
#ifdef USE_XSIMD
// specialize for SIMD only if available
// otherwise use the std::copy fallback
template <class Op, class vectorizer, class E, class F>
void vbroadcast_update(E &&self, F const &other)
{
using T = typename F::dtype;
using vT = typename xsimd::batch<T>;
long other_size = std::distance(other.begin(), other.end());
static const std::size_t vN = vT::size;
auto oiter = vectorizer::vbegin(other);
auto iter = vectorizer::vbegin(self);
const long bound =
std::distance(vectorizer::vbegin(other), vectorizer::vend(other));
for (auto end = vectorizer::vend(self); iter != end; ++iter, ++oiter) {
iter.store(Op{}(*iter, *oiter));
}
// tail
{
auto siter = self.begin();
auto oiter = other.begin();
for (long i = bound * vN; i < other_size; ++i)
Op{}(*(siter + i), *(oiter + i));
}
}
template <class Op, class vectorizer, class E, class F0, class F1>
void vbroadcast_update(E &&self, types::broadcast<F0, F1> const &other)
{
auto value = *other.begin();
for (auto siter = self.begin(), send = self.end(); siter != send; ++siter)
Op{}(*siter, value);
}
template <class Op, class vectorizer, class E, class F>
void vbroadcast_update(E &&self, types::broadcasted<F> const &other)
{
auto value = *other.end();
for (auto siter = self.begin(), send = self.end(); siter != send; ++siter)
Op{}(*siter, value);
}
template <class Op>
struct _broadcast_update<Op, types::vectorizer, 1, 0> {
template <class... Args>
void operator()(Args &&... args)
{
vbroadcast_update<Op, types::vectorizer>(std::forward<Args>(args)...);
}
};
template <class Op>
struct _broadcast_update<Op, types::vectorizer_nobroadcast, 1, 0> {
template <class... Args>
void operator()(Args &&... args)
{
vbroadcast_update<Op, types::vectorizer_nobroadcast>(
std::forward<Args>(args)...);
}
};
#endif
template <class Op, bool vector_form, class E, class F, size_t N, size_t D>
struct broadcast_update_dispatcher;
template <class Op, class E, class F, size_t N, size_t D>
struct broadcast_update_dispatcher<Op, false, E, F, N, D> {
void operator()(E &self, F const &other)
{
if (utils::no_broadcast_ex(other))
_broadcast_update<Op, fast_novectorize, N, D>{}(
self, other, std::make_tuple(), std::make_tuple());
else
_broadcast_update<Op, types::novectorize, N, D>{}(self, other);
}
};
template <class Op, class E, class F, size_t N, size_t D>
struct broadcast_update_dispatcher<Op, true, E, F, N, D> {
void operator()(E &self, F const &other)
{
if (utils::no_broadcast_ex(other))
_broadcast_update<Op, fast_novectorize, N, D>{}(
self, other, std::make_tuple(), std::make_tuple());
else
_broadcast_update<Op, types::vectorizer, N, D>{}(self, other);
}
};
template <class Op, class E, class F, size_t N, size_t D, bool vector_form>
E &broadcast_update(E &self, F const &other)
{
if (self.size())
broadcast_update_dispatcher<Op, vector_form, E, F, N, D>{}(self, other);
return self;
}
}
PYTHONIC_NS_END
#endif

View File

@ -0,0 +1,6 @@
#ifndef PYTHONIC_UTILS_FUNCTOR_HPP
#define PYTHONIC_UTILS_FUNCTOR_HPP
#include "pythonic/include/utils/functor.hpp"
#endif

View File

@ -0,0 +1,18 @@
#ifndef PYTHONIC_UTILS_FWD_HPP
#define PYTHONIC_UTILS_FWD_HPP
#include "pythonic/include/utils/fwd.hpp"
PYTHONIC_NS_BEGIN
namespace utils
{
template <typename... Types>
void fwd(Types const &... types)
{
}
}
PYTHONIC_NS_END
#endif

View File

@ -0,0 +1,6 @@
#ifndef PYTHONIC_UTILS_INT_HPP
#define PYTHONIC_UTILS_INT_HPP
#include "pythonic/include/utils/int_.hpp"
#endif

View File

@ -0,0 +1,50 @@
#ifndef PYTHONIC_UTILS_ITERATOR_HPP
#define PYTHONIC_UTILS_ITERATOR_HPP
#include "pythonic/include/utils/iterator.hpp"
PYTHONIC_NS_BEGIN
namespace utils
{
template <class T>
comparable_iterator<T>::comparable_iterator()
: T()
{
}
template <class T>
comparable_iterator<T>::comparable_iterator(T const &t)
: T(t)
{
}
template <class T>
bool comparable_iterator<T>::operator<(comparable_iterator<T> other)
{
return (*this) != other;
}
template <class T>
iterator_reminder<false, T>::iterator_reminder(T const &v)
: values(v)
{
}
template <class T>
iterator_reminder<true, T>::iterator_reminder(T const &v)
: values(v)
{
}
template <class T, class... Others>
iterator_reminder<true, T, Others...>::iterator_reminder(
T const &v, Others const &... others)
: values(v, others...)
{
}
}
PYTHONIC_NS_END
#endif

View File

@ -0,0 +1,15 @@
#ifndef PYTHONIC_UTILS_META_HPP
#define PYTHONIC_UTILS_META_HPP
#include "pythonic/include/utils/meta.hpp"
template <bool C, class... Types>
struct static_assert_check {
static_assert(C, "Assertion failed <see below for more information>");
static constexpr bool value = C;
};
#define pythran_static_assert(value, str, ...) \
static_assert(static_assert_check<value, __VA_ARGS__>::value, str)
#endif

View File

@ -0,0 +1,34 @@
#ifndef PYTHONIC_UTILS_NESTED_CONTAINER_HPP
#define PYTHONIC_UTILS_NESTED_CONTAINER_HPP
#include "pythonic/include/utils/nested_container.hpp"
#include <limits>
#include "pythonic/types/traits.hpp"
#include "pythonic/utils/numpy_traits.hpp"
PYTHONIC_NS_BEGIN
namespace utils
{
template <class T>
long nested_container_size<T>::flat_size(T const &t)
{
return t.size() *
nested_container_size<typename std::conditional<
// If we have a scalar or a complex, we want to stop
// recursion, and then dispatch to bool specialization
types::is_dtype<typename Type::value_type>::value, bool,
typename Type::value_type>::type>::flat_size(*t.begin());
}
/* Recursion stops on bool */
template <class F>
constexpr long nested_container_size<bool>::flat_size(F)
{
return 1;
}
}
PYTHONIC_NS_END
#endif

View File

@ -0,0 +1,13 @@
#ifndef PYTHONIC_UTILS_NEUTRAL_HPP
#define PYTHONIC_UTILS_NEUTRAL_HPP
#include "pythonic/include/utils/neutral.hpp"
#include "pythonic/operator_/iadd.hpp"
#include "pythonic/operator_/iand.hpp"
#include "pythonic/operator_/ior.hpp"
#include "pythonic/operator_/imul.hpp"
#include "pythonic/operator_/imax.hpp"
#include "pythonic/operator_/imin.hpp"
#include "pythonic/operator_/ixor.hpp"
#endif

View File

@ -0,0 +1,32 @@
#ifndef PYTHONIC_UTILS_NUMPY_CONVERSION_HPP
#define PYTHONIC_UTILS_NUMPY_CONVERSION_HPP
#include "pythonic/include/utils/numpy_conversion.hpp"
#include "pythonic/utils/numpy_traits.hpp"
#if _MSC_VER && !__clang__
#define NUMPY_EXPR_TO_NDARRAY0_IMPL(fname) \
template <class E, class... Types, \
typename std::enable_if<!types::is_ndarray<E>::value && \
types::is_array<E>::value, \
E>::type * = nullptr> \
auto fname(E const &expr, Types &&... others) \
{ \
return fname(types::ndarray<typename E::dtype, typename E::shape_t>{expr}, \
std::forward<Types>(others)...); \
}
#else
#define NUMPY_EXPR_TO_NDARRAY0_IMPL(fname) \
template <class E, class... Types> \
auto fname(E const &expr, Types &&... others) \
->typename std::enable_if< \
!types::is_ndarray<E>::value && types::is_array<E>::value, \
decltype(fname( \
types::ndarray<typename E::dtype, typename E::shape_t>{expr}, \
std::forward<Types>(others)...))>::type \
{ \
return fname(types::ndarray<typename E::dtype, typename E::shape_t>{expr}, \
std::forward<Types>(others)...); \
}
#endif
#endif

View File

@ -0,0 +1,6 @@
#ifndef PYTHONIC_UTILS_NUMPY_TRAITS_HPP
#define PYTHONIC_UTILS_NUMPY_TRAITS_HPP
#include "pythonic/include/utils/numpy_traits.hpp"
#endif

View File

@ -0,0 +1,690 @@
/*
pdqsort.hpp - Pattern-defeating quicksort.
Copyright (c) 2015 Orson Peters
This software is provided 'as-is', without any express or implied warranty.
In no event will the
authors be held liable for any damages arising from the use of this
software.
Permission is granted to anyone to use this software for any purpose,
including commercial
applications, and to alter it and redistribute it freely, subject to the
following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the
original software. If you use this software in a product, an
acknowledgment in the product
documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as
being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#ifndef PDQSORT_HPP
#define PDQSORT_HPP
#include <algorithm>
#include <cstddef>
#include <functional>
#include <utility>
#include <iterator>
#if __cplusplus >= 201103L
#include <cstdint>
#include <type_traits>
#define PDQSORT_PREFER_MOVE(x) std::move(x)
#else
#define PDQSORT_PREFER_MOVE(x) (x)
#endif
namespace pdqsort_detail
{
enum {
// Partitions below this size are sorted using insertion sort.
insertion_sort_threshold = 24,
// Partitions above this size use Tukey's ninther to select the pivot.
ninther_threshold = 128,
// When we detect an already sorted partition, attempt an insertion sort
// that allows this
// amount of element moves before giving up.
partial_insertion_sort_limit = 8,
// Must be multiple of 8 due to loop unrolling, and < 256 to fit in unsigned
// char.
block_size = 64,
// Cacheline size, assumes power of two.
cacheline_size = 64
};
#if __cplusplus >= 201103L
template <class T>
struct is_default_compare : std::false_type {
};
template <class T>
struct is_default_compare<std::less<T>> : std::true_type {
};
template <class T>
struct is_default_compare<std::greater<T>> : std::true_type {
};
#endif
// Returns floor(log2(n)), assumes n > 0.
template <class T>
inline int log2(T n)
{
int log = 0;
while (n >>= 1)
++log;
return log;
}
// Sorts [begin, end) using insertion sort with the given comparison function.
template <class Iter, class Compare>
inline void insertion_sort(Iter begin, Iter end, Compare comp)
{
typedef typename std::iterator_traits<Iter>::value_type T;
if (begin == end)
return;
for (Iter cur = begin + 1; cur != end; ++cur) {
Iter sift = cur;
Iter sift_1 = cur - 1;
// Compare first so we can avoid 2 moves for an element already positioned
// correctly.
if (comp(*sift, *sift_1)) {
T tmp = PDQSORT_PREFER_MOVE(*sift);
do {
*sift-- = PDQSORT_PREFER_MOVE(*sift_1);
} while (sift != begin && comp(tmp, *--sift_1));
*sift = PDQSORT_PREFER_MOVE(tmp);
}
}
}
// Sorts [begin, end) using insertion sort with the given comparison function.
// Assumes
// *(begin - 1) is an element smaller than or equal to any element in [begin,
// end).
template <class Iter, class Compare>
inline void unguarded_insertion_sort(Iter begin, Iter end, Compare comp)
{
typedef typename std::iterator_traits<Iter>::value_type T;
if (begin == end)
return;
for (Iter cur = begin + 1; cur != end; ++cur) {
Iter sift = cur;
Iter sift_1 = cur - 1;
// Compare first so we can avoid 2 moves for an element already positioned
// correctly.
if (comp(*sift, *sift_1)) {
T tmp = PDQSORT_PREFER_MOVE(*sift);
do {
*sift-- = PDQSORT_PREFER_MOVE(*sift_1);
} while (comp(tmp, *--sift_1));
*sift = PDQSORT_PREFER_MOVE(tmp);
}
}
}
// Attempts to use insertion sort on [begin, end). Will return false if more
// than
// partial_insertion_sort_limit elements were moved, and abort sorting.
// Otherwise it will
// successfully sort and return true.
template <class Iter, class Compare>
inline bool partial_insertion_sort(Iter begin, Iter end, Compare comp)
{
typedef typename std::iterator_traits<Iter>::value_type T;
if (begin == end)
return true;
std::size_t limit = 0;
for (Iter cur = begin + 1; cur != end; ++cur) {
Iter sift = cur;
Iter sift_1 = cur - 1;
// Compare first so we can avoid 2 moves for an element already positioned
// correctly.
if (comp(*sift, *sift_1)) {
T tmp = PDQSORT_PREFER_MOVE(*sift);
do {
*sift-- = PDQSORT_PREFER_MOVE(*sift_1);
} while (sift != begin && comp(tmp, *--sift_1));
*sift = PDQSORT_PREFER_MOVE(tmp);
limit += cur - sift;
}
if (limit > partial_insertion_sort_limit)
return false;
}
return true;
}
template <class Iter, class Compare>
inline void sort2(Iter a, Iter b, Compare comp)
{
if (comp(*b, *a))
std::iter_swap(a, b);
}
// Sorts the elements *a, *b and *c using comparison function comp.
template <class Iter, class Compare>
inline void sort3(Iter a, Iter b, Iter c, Compare comp)
{
sort2(a, b, comp);
sort2(b, c, comp);
sort2(a, b, comp);
}
template <class T>
inline T *align_cacheline(T *p)
{
#if defined(UINTPTR_MAX) && __cplusplus >= 201103L
std::uintptr_t ip = reinterpret_cast<std::uintptr_t>(p);
#else
std::size_t ip = reinterpret_cast<std::size_t>(p);
#endif
ip = (ip + cacheline_size - 1) & -cacheline_size;
return reinterpret_cast<T *>(ip);
}
template <class Iter>
inline void swap_offsets(Iter first, Iter last, unsigned char *offsets_l,
unsigned char *offsets_r, int num, bool use_swaps)
{
typedef typename std::iterator_traits<Iter>::value_type T;
if (use_swaps) {
// This case is needed for the descending distribution, where we need
// to have proper swapping for pdqsort to remain O(n).
for (int i = 0; i < num; ++i) {
std::iter_swap(first + offsets_l[i], last - offsets_r[i]);
}
} else if (num > 0) {
Iter l = first + offsets_l[0];
Iter r = last - offsets_r[0];
T tmp(PDQSORT_PREFER_MOVE(*l));
*l = PDQSORT_PREFER_MOVE(*r);
for (int i = 1; i < num; ++i) {
l = first + offsets_l[i];
*r = PDQSORT_PREFER_MOVE(*l);
r = last - offsets_r[i];
*l = PDQSORT_PREFER_MOVE(*r);
}
*r = PDQSORT_PREFER_MOVE(tmp);
}
}
// Partitions [begin, end) around pivot *begin using comparison function comp.
// Elements equal
// to the pivot are put in the right-hand partition. Returns the position of
// the pivot after
// partitioning and whether the passed sequence already was correctly
// partitioned. Assumes the
// pivot is a median of at least 3 elements and that [begin, end) is at least
// insertion_sort_threshold long. Uses branchless partitioning.
template <class Iter, class Compare>
inline std::pair<Iter, bool> partition_right_branchless(Iter begin, Iter end,
Compare comp)
{
typedef typename std::iterator_traits<Iter>::value_type T;
// Move pivot into local for speed.
T pivot(PDQSORT_PREFER_MOVE(*begin));
Iter first = begin;
Iter last = end;
// Find the first element greater than or equal than the pivot (the median
// of 3 guarantees
// this exists).
while (comp(*++first, pivot))
;
// Find the first element strictly smaller than the pivot. We have to guard
// this search if
// there was no element before *first.
if (first - 1 == begin)
while (first < last && !comp(*--last, pivot))
;
else
while (!comp(*--last, pivot))
;
// If the first pair of elements that should be swapped to partition are the
// same element,
// the passed in sequence already was correctly partitioned.
bool already_partitioned = first >= last;
if (!already_partitioned) {
std::iter_swap(first, last);
++first;
}
// The following branchless partitioning is derived from "BlockQuicksort:
// How Branch
// Mispredictions dont affect Quicksort" by Stefan Edelkamp and Armin
// Weiss.
unsigned char offsets_l_storage[block_size + cacheline_size];
unsigned char offsets_r_storage[block_size + cacheline_size];
unsigned char *offsets_l = align_cacheline(offsets_l_storage);
unsigned char *offsets_r = align_cacheline(offsets_r_storage);
int num_l, num_r, start_l, start_r;
num_l = num_r = start_l = start_r = 0;
while (last - first > 2 * block_size) {
// Fill up offset blocks with elements that are on the wrong side.
if (num_l == 0) {
start_l = 0;
Iter it = first;
for (unsigned char i = 0; i < block_size;) {
offsets_l[num_l] = i++;
num_l += !comp(*it, pivot);
++it;
offsets_l[num_l] = i++;
num_l += !comp(*it, pivot);
++it;
offsets_l[num_l] = i++;
num_l += !comp(*it, pivot);
++it;
offsets_l[num_l] = i++;
num_l += !comp(*it, pivot);
++it;
offsets_l[num_l] = i++;
num_l += !comp(*it, pivot);
++it;
offsets_l[num_l] = i++;
num_l += !comp(*it, pivot);
++it;
offsets_l[num_l] = i++;
num_l += !comp(*it, pivot);
++it;
offsets_l[num_l] = i++;
num_l += !comp(*it, pivot);
++it;
}
}
if (num_r == 0) {
start_r = 0;
Iter it = last;
for (unsigned char i = 0; i < block_size;) {
offsets_r[num_r] = ++i;
num_r += comp(*--it, pivot);
offsets_r[num_r] = ++i;
num_r += comp(*--it, pivot);
offsets_r[num_r] = ++i;
num_r += comp(*--it, pivot);
offsets_r[num_r] = ++i;
num_r += comp(*--it, pivot);
offsets_r[num_r] = ++i;
num_r += comp(*--it, pivot);
offsets_r[num_r] = ++i;
num_r += comp(*--it, pivot);
offsets_r[num_r] = ++i;
num_r += comp(*--it, pivot);
offsets_r[num_r] = ++i;
num_r += comp(*--it, pivot);
}
}
// Swap elements and update block sizes and first/last boundaries.
int num = std::min(num_l, num_r);
swap_offsets(first, last, offsets_l + start_l, offsets_r + start_r, num,
num_l == num_r);
num_l -= num;
num_r -= num;
start_l += num;
start_r += num;
if (num_l == 0)
first += block_size;
if (num_r == 0)
last -= block_size;
}
int l_size = 0, r_size = 0;
int unknown_left =
(int)(last - first) - ((num_r || num_l) ? block_size : 0);
if (num_r) {
// Handle leftover block by assigning the unknown elements to the other
// block.
l_size = unknown_left;
r_size = block_size;
} else if (num_l) {
l_size = block_size;
r_size = unknown_left;
} else {
// No leftover block, split the unknown elements in two blocks.
l_size = unknown_left / 2;
r_size = unknown_left - l_size;
}
// Fill offset buffers if needed.
if (unknown_left && !num_l) {
start_l = 0;
Iter it = first;
for (unsigned char i = 0; i < l_size;) {
offsets_l[num_l] = i++;
num_l += !comp(*it, pivot);
++it;
}
}
if (unknown_left && !num_r) {
start_r = 0;
Iter it = last;
for (unsigned char i = 0; i < r_size;) {
offsets_r[num_r] = ++i;
num_r += comp(*--it, pivot);
}
}
int num = std::min(num_l, num_r);
swap_offsets(first, last, offsets_l + start_l, offsets_r + start_r, num,
num_l == num_r);
num_l -= num;
num_r -= num;
start_l += num;
start_r += num;
if (num_l == 0)
first += l_size;
if (num_r == 0)
last -= r_size;
// We have now fully identified [first, last)'s proper position. Swap the
// last elements.
if (num_l) {
offsets_l += start_l;
while (num_l--)
std::iter_swap(first + offsets_l[num_l], --last);
first = last;
}
if (num_r) {
offsets_r += start_r;
while (num_r--)
std::iter_swap(last - offsets_r[num_r], first), ++first;
last = first;
}
// Put the pivot in the right place.
Iter pivot_pos = first - 1;
*begin = PDQSORT_PREFER_MOVE(*pivot_pos);
*pivot_pos = PDQSORT_PREFER_MOVE(pivot);
return std::make_pair(pivot_pos, already_partitioned);
}
// Partitions [begin, end) around pivot *begin using comparison function comp.
// Elements equal
// to the pivot are put in the right-hand partition. Returns the position of
// the pivot after
// partitioning and whether the passed sequence already was correctly
// partitioned. Assumes the
// pivot is a median of at least 3 elements and that [begin, end) is at least
// insertion_sort_threshold long.
template <class Iter, class Compare>
inline std::pair<Iter, bool> partition_right(Iter begin, Iter end,
Compare comp)
{
typedef typename std::iterator_traits<Iter>::value_type T;
// Move pivot into local for speed.
T pivot(PDQSORT_PREFER_MOVE(*begin));
Iter first = begin;
Iter last = end;
// Find the first element greater than or equal than the pivot (the median
// of 3 guarantees
// this exists).
while (comp(*++first, pivot))
;
// Find the first element strictly smaller than the pivot. We have to guard
// this search if
// there was no element before *first.
if (first - 1 == begin)
while (first < last && !comp(*--last, pivot))
;
else
while (!comp(*--last, pivot))
;
// If the first pair of elements that should be swapped to partition are the
// same element,
// the passed in sequence already was correctly partitioned.
bool already_partitioned = first >= last;
// Keep swapping pairs of elements that are on the wrong side of the pivot.
// Previously
// swapped pairs guard the searches, which is why the first iteration is
// special-cased
// above.
while (first < last) {
std::iter_swap(first, last);
while (comp(*++first, pivot))
;
while (!comp(*--last, pivot))
;
}
// Put the pivot in the right place.
Iter pivot_pos = first - 1;
*begin = PDQSORT_PREFER_MOVE(*pivot_pos);
*pivot_pos = PDQSORT_PREFER_MOVE(pivot);
return std::make_pair(pivot_pos, already_partitioned);
}
// Similar function to the one above, except elements equal to the pivot are
// put to the left of
// the pivot and it doesn't check or return if the passed sequence already was
// partitioned.
// Since this is rarely used (the many equal case), and in that case pdqsort
// already has O(n)
// performance, no block quicksort is applied here for simplicity.
template <class Iter, class Compare>
inline Iter partition_left(Iter begin, Iter end, Compare comp)
{
typedef typename std::iterator_traits<Iter>::value_type T;
T pivot(PDQSORT_PREFER_MOVE(*begin));
Iter first = begin;
Iter last = end;
while (comp(pivot, *--last))
;
if (last + 1 == end)
while (first < last && !comp(pivot, *++first))
;
else
while (!comp(pivot, *++first))
;
while (first < last) {
std::iter_swap(first, last);
while (comp(pivot, *--last))
;
while (!comp(pivot, *++first))
;
}
Iter pivot_pos = last;
*begin = PDQSORT_PREFER_MOVE(*pivot_pos);
*pivot_pos = PDQSORT_PREFER_MOVE(pivot);
return pivot_pos;
}
template <class Iter, class Compare, bool Branchless>
inline void pdqsort_loop(Iter begin, Iter end, Compare comp, int bad_allowed,
bool leftmost = true)
{
typedef typename std::iterator_traits<Iter>::difference_type diff_t;
// Use a while loop for tail recursion elimination.
while (true) {
diff_t size = end - begin;
// Insertion sort is faster for small arrays.
if (size < insertion_sort_threshold) {
if (leftmost)
insertion_sort(begin, end, comp);
else
unguarded_insertion_sort(begin, end, comp);
return;
}
// Choose pivot as median of 3 or pseudomedian of 9.
diff_t s2 = size / 2;
if (size > ninther_threshold) {
sort3(begin, begin + s2, end - 1, comp);
sort3(begin + 1, begin + (s2 - 1), end - 2, comp);
sort3(begin + 2, begin + (s2 + 1), end - 3, comp);
sort3(begin + (s2 - 1), begin + s2, begin + (s2 + 1), comp);
std::iter_swap(begin, begin + s2);
} else
sort3(begin + s2, begin, end - 1, comp);
// If *(begin - 1) is the end of the right partition of a previous
// partition operation
// there is no element in [begin, end) that is smaller than *(begin - 1).
// Then if our
// pivot compares equal to *(begin - 1) we change strategy, putting equal
// elements in
// the left partition, greater elements in the right partition. We do not
// have to
// recurse on the left partition, since it's sorted (all equal).
if (!leftmost && !comp(*(begin - 1), *begin)) {
begin = partition_left(begin, end, comp) + 1;
continue;
}
// Partition and get results.
std::pair<Iter, bool> part_result =
Branchless ? partition_right_branchless(begin, end, comp)
: partition_right(begin, end, comp);
Iter pivot_pos = part_result.first;
bool already_partitioned = part_result.second;
// Check for a highly unbalanced partition.
diff_t l_size = pivot_pos - begin;
diff_t r_size = end - (pivot_pos + 1);
bool highly_unbalanced = l_size < size / 8 || r_size < size / 8;
// If we got a highly unbalanced partition we shuffle elements to break
// many patterns.
if (highly_unbalanced) {
// If we had too many bad partitions, switch to heapsort to guarantee
// O(n log n).
if (--bad_allowed == 0) {
std::make_heap(begin, end, comp);
std::sort_heap(begin, end, comp);
return;
}
if (l_size >= insertion_sort_threshold) {
std::iter_swap(begin, begin + l_size / 4);
std::iter_swap(pivot_pos - 1, pivot_pos - l_size / 4);
if (l_size > ninther_threshold) {
std::iter_swap(begin + 1, begin + (l_size / 4 + 1));
std::iter_swap(begin + 2, begin + (l_size / 4 + 2));
std::iter_swap(pivot_pos - 2, pivot_pos - (l_size / 4 + 1));
std::iter_swap(pivot_pos - 3, pivot_pos - (l_size / 4 + 2));
}
}
if (r_size >= insertion_sort_threshold) {
std::iter_swap(pivot_pos + 1, pivot_pos + (1 + r_size / 4));
std::iter_swap(end - 1, end - r_size / 4);
if (r_size > ninther_threshold) {
std::iter_swap(pivot_pos + 2, pivot_pos + (2 + r_size / 4));
std::iter_swap(pivot_pos + 3, pivot_pos + (3 + r_size / 4));
std::iter_swap(end - 2, end - (1 + r_size / 4));
std::iter_swap(end - 3, end - (2 + r_size / 4));
}
}
} else {
// If we were decently balanced and we tried to sort an already
// partitioned
// sequence try to use insertion sort.
if (already_partitioned &&
partial_insertion_sort(begin, pivot_pos, comp) &&
partial_insertion_sort(pivot_pos + 1, end, comp))
return;
}
// Sort the left partition first using recursion and do tail recursion
// elimination for
// the right-hand partition.
pdqsort_loop<Iter, Compare, Branchless>(begin, pivot_pos, comp,
bad_allowed, leftmost);
begin = pivot_pos + 1;
leftmost = false;
}
}
}
template <class Iter, class Compare>
inline void pdqsort(Iter begin, Iter end, Compare comp)
{
if (begin == end)
return;
#if __cplusplus >= 201103L
pdqsort_detail::pdqsort_loop<
Iter, Compare, pdqsort_detail::is_default_compare<
typename std::decay<Compare>::type>::value &&
std::is_arithmetic<typename std::iterator_traits<
Iter>::value_type>::value>(
begin, end, comp, pdqsort_detail::log2(end - begin));
#else
pdqsort_detail::pdqsort_loop<Iter, Compare, false>(
begin, end, comp, pdqsort_detail::log2(end - begin));
#endif
}
template <class Iter>
inline void pdqsort(Iter begin, Iter end)
{
typedef typename std::iterator_traits<Iter>::value_type T;
pdqsort(begin, end, std::less<T>());
}
template <class Iter, class Compare>
inline void pdqsort_branchless(Iter begin, Iter end, Compare comp)
{
if (begin == end)
return;
pdqsort_detail::pdqsort_loop<Iter, Compare, true>(
begin, end, comp, pdqsort_detail::log2(end - begin));
}
template <class Iter>
inline void pdqsort_branchless(Iter begin, Iter end)
{
typedef typename std::iterator_traits<Iter>::value_type T;
pdqsort_branchless(begin, end, std::less<T>());
}
#undef PDQSORT_PREFER_MOVE
#endif

View File

@ -0,0 +1,18 @@
#ifndef PYTHONIC_UTILS_RESERVE_HPP
#define PYTHONIC_UTILS_RESERVE_HPP
#include "pythonic/include/utils/reserve.hpp"
PYTHONIC_NS_BEGIN
namespace utils
{
template <class Container, class From>
void reserve(Container &, From &&) // do nothing unless specialized
{
}
}
PYTHONIC_NS_END
#endif

View File

@ -0,0 +1,6 @@
#ifndef PYTHONIC_UTILS_SEQ_HPP
#define PYTHONIC_UTILS_SEQ_HPP
#include "pythonic/include/utils/seq.hpp"
#endif

View File

@ -0,0 +1,149 @@
#ifndef PYTHONIC_UTILS_SHARED_REF_HPP
#define PYTHONIC_UTILS_SHARED_REF_HPP
#include "pythonic/include/utils/shared_ref.hpp"
#include <memory>
#include <utility>
#include <unordered_map>
#ifdef _OPENMP
#include <atomic>
#endif
PYTHONIC_NS_BEGIN
namespace utils
{
/** Light-weight shared_ptr like-class
*
* Unlike std::shared_ptr, it allocates the memory itself using new.
*/
template <class T>
template <class... Types>
shared_ref<T>::memory::memory(Types &&... args)
: ptr(std::forward<Types>(args)...), count(1), foreign(nullptr)
{
}
template <class T>
shared_ref<T>::shared_ref(no_memory const &) noexcept : mem(nullptr)
{
}
template <class T>
shared_ref<T>::shared_ref(no_memory &&) noexcept : mem(nullptr)
{
}
template <class T>
template <class... Types>
shared_ref<T>::shared_ref(Types &&... args)
: mem(new (std::nothrow) memory(std::forward<Types>(args)...))
{
}
template <class T>
shared_ref<T>::shared_ref(shared_ref<T> &&p) noexcept : mem(p.mem)
{
p.mem = nullptr;
}
template <class T>
shared_ref<T>::shared_ref(shared_ref<T> const &p) noexcept : mem(p.mem)
{
if (mem)
acquire();
}
template <class T>
shared_ref<T>::shared_ref(shared_ref<T> &p) noexcept : mem(p.mem)
{
if (mem)
acquire();
}
template <class T>
shared_ref<T>::~shared_ref() noexcept
{
dispose();
}
template <class T>
void shared_ref<T>::swap(shared_ref<T> &rhs) noexcept
{
using std::swap;
swap(mem, rhs.mem);
}
template <class T>
shared_ref<T> &shared_ref<T>::operator=(shared_ref<T> p) noexcept
{
swap(p);
return *this;
}
template <class T>
T &shared_ref<T>::operator*() const noexcept
{
assert(mem);
return mem->ptr;
}
template <class T>
T *shared_ref<T>::operator->() const noexcept
{
assert(mem);
return &mem->ptr;
}
template <class T>
bool shared_ref<T>::operator!=(shared_ref<T> const &other) const noexcept
{
return mem != other.mem;
}
template <class T>
bool shared_ref<T>::operator==(shared_ref<T> const &other) const noexcept
{
return mem == other.mem;
}
template <class T>
void shared_ref<T>::external(extern_type obj_ptr)
{
assert(mem);
mem->foreign = obj_ptr;
}
template <class T>
inline extern_type shared_ref<T>::get_foreign()
{
assert(mem);
return mem->foreign;
}
template <class T>
void shared_ref<T>::dispose()
{
if (mem && --mem->count == 0) {
#ifdef ENABLE_PYTHON_MODULE
if (mem->foreign) {
Py_DECREF(mem->foreign);
}
#endif
delete mem;
mem = nullptr;
}
}
template <class T>
void shared_ref<T>::acquire()
{
assert(mem);
++mem->count;
}
}
PYTHONIC_NS_END
#endif

View File

@ -0,0 +1,7 @@
#ifndef PYTHONIC_UTILS_TAGS_HPP
#define PYTHONIC_UTILS_TAGS_HPP
#include "pythonic/include/utils/tags.hpp"
#include "pythonic/types/traits.hpp"
#endif

View File

@ -0,0 +1,28 @@
#ifndef PYTHRAN_UTILS_YIELD_HPP
#define PYTHRAN_UTILS_YIELD_HPP
#include "pythonic/include/utils/yield.hpp"
/*
* This contains base class for yielders
*/
#include "pythonic/types/generator.hpp"
PYTHONIC_NS_BEGIN
yielder::yielder() : __generator_state(0)
{
}
bool yielder::operator!=(yielder const &other) const
{
return __generator_state != other.__generator_state;
}
bool yielder::operator==(yielder const &other) const
{
return __generator_state == other.__generator_state;
}
PYTHONIC_NS_END
#endif