Esempio n. 1
0
def choices(population, weights=None, *, cum_weights=None, k=1):
    """Return a k sized list of population elements chosen with replacement.
    If the relative weights or cumulative weights are not specified,
    the selections are made with equal probability.

    from: https://github.com/python/cpython/blob/master/Lib/random.py
    """
    n = len(population)
    if cum_weights is None:
        if weights is None:
            _int = int
            n += 0.0  # convert to float for a small speed improvement
            return [
                population[_int(random.random() * n)]
                for i in _repeat(None, k)
            ]
        cum_weights = list(_accumulate(weights))
    elif weights is not None:
        raise TypeError('Cannot specify both weights and cumulative weights')
    if len(cum_weights) != n:
        raise ValueError('The number of weights does not match the population')
    total = cum_weights[-1] + 0.0  # convert to float
    if total <= 0.0:
        raise ValueError('Total of weights must be greater than zero')
    bisect = _bisect
    hi = n - 1
    return [
        population[bisect(cum_weights,
                          random.random() * total, 0, hi)]
        for i in _repeat(None, k)
    ]
Esempio n. 2
0
    def choices(self, population, weights=None, *, cum_weights=None, k=1):
        """Return a k sized list of population elements chosen with replacement.

        If the relative weights or cumulative weights are not specified,
        the selections are made with equal probability.

        """
        random = self.random
        n = len(population)
        if cum_weights is None:
            if weights is None:
                floor = _floor
                n += 0.0    # convert to float for a small speed improvement
                return [population[floor(random() * n)] for i in _repeat(None, k)]
            cum_weights = list(_accumulate(weights))
        elif weights is not None:
            raise TypeError('Cannot specify both weights and cumulative weights')
        if len(cum_weights) != n:
            raise ValueError('The number of weights does not match the population')
        total = cum_weights[-1] + 0.0   # convert to float
        if total <= 0.0:
            raise ValueError('Total of weights must be greater than zero')
        if not _isfinite(total):
            raise ValueError('Total of weights must be finite')
        bisect = _bisect
        hi = n - 1
        return [population[bisect(cum_weights, random() * total, 0, hi)]
                for i in _repeat(None, k)]
Esempio n. 3
0
def choices(population, weights=None, *, cum_weights=None, k=1):
    """Return a k sized list of population elements chosen with replacement.
    If the relative weights or cumulative weights are not specified,
    the selections are made with equal probability.
    """
    n = len(population)
    if cum_weights is None:
        if weights is None:
            _int = int
            n += 0.0  # convert to float for a small speed improvement
            return [
                population[_int(random.random() * n)]
                for i in _repeat(None, k)
            ]
        cum_weights = list(_accumulate(weights))
    elif weights is not None:
        raise TypeError("Cannot specify both weights and cumulative weights")
    if len(cum_weights) != n:
        raise ValueError("The number of weights does not match the population")
    bisect = _bisect
    total = cum_weights[-1] + 0.0  # convert to float
    hi = n - 1
    return [
        population[bisect(cum_weights,
                          random.random() * total, 0, hi)]
        for i in _repeat(None, k)
    ]
Esempio n. 4
0
 def __init__(self, data, error=None, meta=None):
     super().__init__()
     if error is None:
         error = _repeat(None)
     if meta is None:
         meta = _repeat(None)
     for d, e, m in zip(data, error, meta):
         self.children.append(Coordinate(d, e, m))
Esempio n. 5
0
 def columns_font(self):
     try:
         cf = self._columns_font
     except AttributeError:
         return _repeat('')
     if isinstance(cf, str):
         return _repeat(cf)
     else:
         return cf
def times(A, B):
    print 'times', A, B
    if hasattr(A, 'val'): A = A.val
    if hasattr(B, 'val'): B = B.val
    if not hasattr(A, '__len__'):
        if not hasattr(B, '__len__'): return A*B
        else: A = _repeat(A, len(B))
    elif not hasattr(B, '__len__'):
        B = _repeat(B, len(A))
    res = [ a*b for a, b in _izip(A, B)]
    return res
def times(A, B):
    print 'times', A, B
    if hasattr(A, 'val'): A = A.val
    if hasattr(B, 'val'): B = B.val
    if not hasattr(A, '__len__'):
        if not hasattr(B, '__len__'): return A * B
        else: A = _repeat(A, len(B))
    elif not hasattr(B, '__len__'):
        B = _repeat(B, len(A))
    res = [a * b for a, b in _izip(A, B)]
    return res
Esempio n. 8
0
 def columns_alignment(self):
     try:
         ca = self._columns_alignment
     except AttributeError:
         return _repeat(0.5)
     d = {
         'LEFT': 0.0,
         'CENTER': 0.5,
         'RIGHT': 1.0,
     }
     if isinstance(ca, str):
         return _repeat(d[ca])
     else:
         return [d[x] for x in ca]
Esempio n. 9
0
    def select(self, *comptypes, exclude=None):
        """Iterate over entity ids and their corresponding components. Yields tuples of the form `(eid, (compA, compB, ...))` where `compA`, `compB`, ... are of the given component types and belong to the entity with entity id eid. If no component types are given, iterate over all entities. If *exclude* is not *None*, entities with component types listed in *exclude* will not be considered. Raises *ValueError* if *exclude* contains component types that are also explicitly included."""

        # raise ValueError if trying to exclude component types that are also included
        if exclude and any(ct in exclude for ct in comptypes):
            raise ValueError(
                f"excluding explicitely included component types: {', '.join(str(x) for x in set(comptypes).intersection(exclude))}"
            )

        # collect archetypes that should be included and archetypes that should be excluded
        incarchetypes = set.intersection(
            *[self.archetypemap.get(ct, set())
              for ct in comptypes]) if comptypes else set(self.chunkmap.keys())
        excarchetypes = set.union(
            *[self.archetypemap.get(ct, set())
              for ct in exclude]) if exclude else set()

        # iterate over all included archetype that are not excluded
        # the iteration is reversed, because this will yield better performance when calling e.g. scene.remove() on the result.
        archetypes = incarchetypes - excarchetypes
        if comptypes:
            for archetype in archetypes:
                eidlist, comptypemap = self.chunkmap[archetype]
                complists = [reversed(comptypemap[ct]) for ct in comptypes]
                yield from zip(reversed(eidlist), zip(*complists))
        else:
            for archetype in archetypes:
                eidlist, _ = self.chunkmap[archetype]
                yield from zip(reversed(eidlist), _repeat(()))
Esempio n. 10
0
def choices(population, weights=None, *, cum_weights=None, k=1):
    n = len(population)
    if cum_weights is None:
        if weights is None:
            _int = int
            n += 0.0  # convert to float for a small speed improvement
            return [population[_int(random.random() * n)] for i in _repeat(None, k)]
        cum_weights = list(_accumulate(weights))
    elif weights is not None:
        raise TypeError('Cannot specify both weights and cumulative weights')
    if len(cum_weights) != n:
        raise ValueError('The number of weights does not match the population')
    bisect = _bisect
    total = cum_weights[-1] + 0.0  # convert to float
    hi = n - 1
    return [population[bisect(cum_weights, random.random() * total, 0, hi)]
            for i in _repeat(None, k)]
Esempio n. 11
0
 def __ge__(self, other):
     from array import array
     if _isscalar(other):
         if hasattr(other, '__len__'): other = _cycle(other)
         else: other = _repeat(other)
     na = _marray('bool', self.msize, 
                  array(_dtype2array['bool'],
                        (x >= y for x, y in _izip(self, other))))
     return na
Esempio n. 12
0
 def __elmul__(self, him):
     from array import array
     if _isscalar(him):
         if hasattr(him, '__len__'): him = _cycle(him)
         else: him = _repeat(him)
     na = _marray(self.dtype, self.msize, 
                  array(_dtype2array[self.dtype], 
                        (x*y for x, y in _izip(self._a, him))))
     return na
Esempio n. 13
0
 def __setitem1__(self, i, val):
     # determine the size of the new array
     nshp = _ndshape1(self.msize, *i)
     i = (isinstance(x, _marray) and iter(x._a) or x for x in i)
     ins = list(_ndilin1(self.msize, *i))
     if _isscalar(val):
         if hasattr(val, '__len__'): val = _cycle(val)
         else: val = _repeat(val)
     for j, v in _izip(ins, val):
         self._a[j] = v
Esempio n. 14
0
 def __ge__(self, other):
     from array import array
     if _isscalar(other):
         if hasattr(other, '__len__'): other = _cycle(other)
         else: other = _repeat(other)
     na = _marray(
         'bool', self.msize,
         array(_dtype2array['bool'],
               (x >= y for x, y in _izip(self, other))))
     return na
Esempio n. 15
0
 def __elmul__(self, him):
     from array import array
     if _isscalar(him):
         if hasattr(him, '__len__'): him = _cycle(him)
         else: him = _repeat(him)
     na = _marray(
         self.dtype, self.msize,
         array(_dtype2array[self.dtype],
               (x * y for x, y in _izip(self._a, him))))
     return na
Esempio n. 16
0
 def __setitem1__(self, i, val):
     # determine the size of the new array
     nshp = _ndshape1(self.msize, *i)
     i = ( isinstance(x, _marray) and iter(x._a) or x for x in i )
     ins = list(_ndilin1(self.msize, *i))
     if _isscalar(val):
         if hasattr(val, '__len__'): val = _cycle(val)
         else: val = _repeat(val)
     for j, v in _izip(ins, val):
         self._a[j] = v
Esempio n. 17
0
def p010(ceiling):
	numbers = list(range(2, ceiling))
	i = 0
	while i < ceiling - 2:
		jump = numbers[i]
		numbers[i + jump:ceiling - 2:jump] = _repeat(0, _ceil((len(numbers) - i) / jump) - 1)
		i += 1
		while i < ceiling - 2 and numbers[i] == 0:
			i += 1
	return sum(numbers)
Esempio n. 18
0
 def _zip_longest(*args, **kwds):
     # izip_longest('ABCD', 'xy', fillvalue='-') --> Ax By C- D-
     fillvalue = kwds.get('fillvalue')
     def sentinel(counter = ([fillvalue]*(len(args)-1)).pop):
         yield counter()         # yields the fillvalue, or raises IndexError
     fillers = _repeat(fillvalue)
     iters = [_chain(it, sentinel(), fillers) for it in args]
     try:
         for tup in izip(*iters):
             yield tup
     except IndexError:
         pass
Esempio n. 19
0
 def _zip_longest(*args, **kwds):
     # izip_longest('ABCD', 'xy', fillvalue='-') --> Ax By C- D-
     fillvalue = kwds.get('fillvalue')
     def sentinel(counter = ([fillvalue]*(len(args)-1)).pop):
         yield counter()         # yields the fillvalue, or raises IndexError
     fillers = _repeat(fillvalue)
     iters = [_chain(it, sentinel(), fillers) for it in args]
     try:
         for tup in izip(*iters):
             yield tup
     except IndexError:
         pass
Esempio n. 20
0
def extract_data_report(filename):
    """
    @param filename: name of data report to open
    @type filename: str
    @return: headers and data from data report
    @rtype: (list[str], list[str])
    """
    with open(filename, 'r') as f:
        headers = f.readline().split(',')
        data = f.read().splitlines()

    data = _zip_longest(*map(str.split, data, _repeat(',')), fillvalue='')

    return headers, data
Esempio n. 21
0
def _test_generator(n, func, args):
    from statistics import stdev, fmean as mean
    from time import perf_counter

    t0 = perf_counter()
    data = [func(*args) for i in _repeat(None, n)]
    t1 = perf_counter()

    xbar = mean(data)
    sigma = stdev(data, xbar)
    low = min(data)
    high = max(data)

    print(f'{t1 - t0:.3f} sec, {n} times {func.__name__}')
    print('avg %g, stddev %g, min %g, max %g\n' % (xbar, sigma, low, high))
Esempio n. 22
0
    def choices(self, population, weights=None, *, cum_weights=None, k=1):
        """Return a k sized list of population elements chosen with replacement.

        If the relative weights or cumulative weights are not specified,
        the selections are made with equal probability.

        """
        random = self.random
        n = len(population)
        if cum_weights is None:
            if weights is None:
                _int = int
                n += 0.0    # convert to float for a small speed improvement
                return [population[_int(random() * n)] for i in _repeat(None, k)]
            cum_weights = list(_accumulate(weights))
        elif weights is not None:
            raise TypeError('Cannot specify both weights and cumulative weights')
        if len(cum_weights) != n:
            raise ValueError('The number of weights does not match the population')
        bisect = _bisect
        total = cum_weights[-1] + 0.0   # convert to float
        hi = n - 1
        return [population[bisect(cum_weights, random() * total, 0, hi)]
                for i in _repeat(None, k)]
Esempio n. 23
0
 class Transform(Derived):
     def __init__(self, operator, /, *operands, **kwargs):
         self.operands, self.operator = operands, operator
         isdim = tuple(isinstance(op, Dimension) for op in operands)
         nisdim = isdim.count(True)
         if all(isdim):
             self.iter_fn = _partial(map, operator, *operands)
         elif not nisdim:
             raise ValueError("No dims in input!")
         else:
             if nisdim == 1:
                 self.iterlen = operands[isdim.index(True)].iterlen
             getops = lambda: (op
                               if isinstance(op, Dimension) else _repeat(op)
                               for op in operands)
             self.iter_fn = _partial(map, operator, *getops())
         super().__init__(operator, *operands, **kwargs)
Esempio n. 24
0
 def extend() -> None:
     k = segmented_sieve.end_segment
     add = 1
     p = segmented_sieve.primes[k]
     length = len(segmented_sieve.primes)
     while 2 * p * add + add**2 < 1000 and k + add + 1 < length:
         add += 1
     p, q = segmented_sieve.primes[k], segmented_sieve.primes[k + add]
     segment_min = p * p
     segment_len = q * q - segment_min
     is_prime = [True] * segment_len
     for pk in segmented_sieve.primes[:k + add]:
         start = _lcm_ngem(pk, segment_min)
         prime_count = _math.ceil(
             (segment_len - (start - segment_min)) / pk)
         is_prime[start - segment_min::pk] = _repeat(False, prime_count)
     segmented_sieve.primes.extend(
         _compress(range(segment_min, q * q), is_prime))
     segmented_sieve.end_segment += add
     segmented_sieve.searched_till = segmented_sieve.primes[k + add]**2 - 1
Esempio n. 25
0
    def __init__(self, f=None, g=None, *args, **kwargs):
        super(FISTA, self).__init__(*args, **kwargs)

        if f.gradient.shape[1] is None:
            assert g.gradient.shape[1] is not None
            self.x = _np.zeros(g.gradient.shape[1])
        elif g.gradient.shape[1] is None:
            assert f.gradient.shape[1] is not None
            self.x = _np.zeros(f.gradient.shape[1])
        else:
            assert f.gradient.shape[1] == g.gradient.shape[1]
            self.x = _np.zeros(f.gradient.shape[1])

        self._f = f
        self._g = g
        self._y = self.x

        ts = _accumulate(_repeat(1.0), lambda t, el:
                         0.5 * (1 + _np.sqrt(1 + 4 * t ** 2)))
        self._alphas = iter(_diff(ts, lambda t1, t2: (t1 - 1) / t2))
Esempio n. 26
0
def blobs2feats_fitted(blobA, blobB, blobs, tmin, tmax, rmin, rmax):
    """ Generate a stream of features corresponding to limits.
    
        Yields 5-element tuples: indexes for three blobs followed by feature ratio, theta.
        
        Used when two blobs are known to be part of the target feature,
        but it's not clear which specific points in the feature they will
        correspond to. Performs a simple walk over all six possibilities
        using blobs2feats_limited().
    """
    lim = tmin, tmax, rmin, rmax
    ABL, BAL, ALB, BLA, LAB, LBA = 0, 1, 2, 3, 4, 5
    
    matches = chain(izip(_repeat(ABL), blobs2feats_limited([blobA], [blobB], blobs, *lim)),
                    izip(_repeat(BAL), blobs2feats_limited([blobB], [blobA], blobs, *lim)),
                    izip(_repeat(ALB), blobs2feats_limited([blobA], blobs, [blobB], *lim)),
                    izip(_repeat(BLA), blobs2feats_limited([blobB], blobs, [blobA], *lim)),
                    izip(_repeat(LAB), blobs2feats_limited(blobs, [blobA], [blobB], *lim)),
                    izip(_repeat(LBA), blobs2feats_limited(blobs, [blobB], [blobA], *lim)))
    
    for (arrangement, match_tuple) in matches:
        if arrangement == ABL:
            i, j, k = blobs.index(blobA), blobs.index(blobB), match_tuple[2]
        elif arrangement == BAL:
            i, j, k = blobs.index(blobB), blobs.index(blobA), match_tuple[2]
        elif arrangement == ALB:
            i, j, k = blobs.index(blobA), match_tuple[1], blobs.index(blobB)
        elif arrangement == BLA:
            i, j, k = blobs.index(blobB), match_tuple[1], blobs.index(blobA)
        elif arrangement == LAB:
            i, j, k = match_tuple[0], blobs.index(blobA), blobs.index(blobB)
        elif arrangement == LBA:
            i, j, k = match_tuple[0], blobs.index(blobB), blobs.index(blobA)

        ratio, theta = match_tuple[3:5]
        
        yield i, j, k, ratio, theta
Esempio n. 27
0
        elif weights is not None:
            raise TypeError('Cannot specify both weights and cumulative weights')
        if len(cum_weights) != len(population):
            raise ValueError('The number of weights does not match the population')
        bisect = _bisect.bisect
        total = cum_weights[-1]
        hi = len(cum_weights) - 1
        return [population[bisect(cum_weights, random() * total, 0, hi)]
                for i in range(k)]
=======
        n = len(population)
        if cum_weights is None:
            if weights is None:
                _int = int
                n += 0.0    # convert to float for a small speed improvement
                return [population[_int(random() * n)] for i in _repeat(None, k)]
            cum_weights = list(_accumulate(weights))
        elif weights is not None:
            raise TypeError('Cannot specify both weights and cumulative weights')
        if len(cum_weights) != n:
            raise ValueError('The number of weights does not match the population')
        bisect = _bisect
        total = cum_weights[-1] + 0.0   # convert to float
        hi = n - 1
        return [population[bisect(cum_weights, random() * total, 0, hi)]
                for i in _repeat(None, k)]
>>>>>>> 716b15a33aed978ded8a6bde17855cb6c6aa7f78

## -------------------- real-valued distributions  -------------------

## -------------------- uniform distribution -------------------
Esempio n. 28
0
def segments_intersections(segments: _Sequence[_Segment],
                           *,
                           context: _Optional[_Context] = None
                           ) -> _Dict[_Tuple[int, int], _Intersection]:
    """
    Returns mapping between intersection points
    and corresponding segments indices.

    Based on Bentley-Ottmann algorithm.

    Time complexity:
        ``O(len(segments) * log len(segments) + len(intersections))``
    Memory complexity:
        ``O(len(segments) + len(intersections))``
    Reference:
        https://en.wikipedia.org/wiki/Bentley%E2%80%93Ottmann_algorithm

    :param segments: sequence of segments.
    :param context: geometrical context.
    :returns:
        mapping between intersection points and corresponding segments indices.

    >>> from ground.base import get_context
    >>> context = get_context()
    >>> Point, Segment = context.point_cls, context.segment_cls
    >>> segments_intersections([]) == {}
    True
    >>> segments_intersections([Segment(Point(0, 0), Point(2, 2))]) == {}
    True
    >>> segments_intersections([Segment(Point(0, 0), Point(2, 0)),
    ...                         Segment(Point(0, 2), Point(2, 2))]) == {}
    True
    >>> (segments_intersections([Segment(Point(0, 0), Point(2, 2)),
    ...                          Segment(Point(0, 0), Point(2, 2))])
    ...  == {(0, 1): (Point(0, 0), Point(2, 2))})
    True
    >>> (segments_intersections([Segment(Point(0, 0), Point(2, 2)),
    ...                          Segment(Point(2, 0), Point(0, 2))])
    ...  == {(0, 1): (Point(1, 1),)})
    True
    """
    left_parts_ids, right_parts_ids = {}, {}
    left_tangents, right_tangents = {}, {}
    for event in _sweep(
            segments, context=_get_context() if context is None else context):
        if event.tangents:
            (left_tangents.setdefault(event.start, {}).setdefault(
                event.end,
                set()).update(tangent.end for tangent in event.tangents))
        if event.right.tangents:
            (right_tangents.setdefault(event.end, {}).setdefault(
                event.start,
                set()).update(tangent.end for tangent in event.right.tangents))
        for start, ends_ids in event.parts_ids.items():
            for end, ids in ends_ids.items():
                (left_parts_ids.setdefault(start,
                                           {}).setdefault(end,
                                                          set()).update(ids))
                (right_parts_ids.setdefault(end,
                                            {}).setdefault(start,
                                                           set()).update(ids))
    discrete = {}  # type: _Dict[_Tuple[int, int], _Tuple[_Point]]
    for intersection_point, ends_tangents_ends in left_tangents.items():
        left_intersection_point_ids, right_intersection_point_ids = (
            left_parts_ids.get(intersection_point),
            right_parts_ids.get(intersection_point))
        for end, tangents_ends in ends_tangents_ends.items():
            ids = left_intersection_point_ids[end]
            for tangent_end in tangents_ends:
                tangent_ids = (left_intersection_point_ids[tangent_end]
                               if intersection_point < tangent_end else
                               right_intersection_point_ids[tangent_end])
                ids_pairs = [
                    _to_sorted_pair(id_, tangent_id)
                    for id_, tangent_id in _product(ids -
                                                    tangent_ids, tangent_ids -
                                                    ids)
                ]
                discrete.update(zip(ids_pairs, _repeat(
                    (intersection_point, ))))
    for intersection_point, starts_tangents_ends in right_tangents.items():
        left_intersection_point_ids, right_intersection_point_ids = (
            left_parts_ids.get(intersection_point),
            right_parts_ids.get(intersection_point))
        for start, tangents_ends in starts_tangents_ends.items():
            ids = right_intersection_point_ids[start]
            for tangent_end in tangents_ends:
                tangent_ids = (left_intersection_point_ids[tangent_end]
                               if intersection_point < tangent_end else
                               right_intersection_point_ids[tangent_end])
                ids_pairs = [
                    _to_sorted_pair(id_, tangent_id)
                    for id_, tangent_id in _product(ids -
                                                    tangent_ids, tangent_ids -
                                                    ids)
                ]
                discrete.update(zip(ids_pairs, _repeat(
                    (intersection_point, ))))
    continuous = {}  # type: _Dict[_Tuple[int, int], _Tuple[_Point, _Point]]
    for start, ends_ids in left_parts_ids.items():
        for end, ids in ends_ids.items():
            for ids_pair in _to_pairs_combinations(sorted(ids)):
                if ids_pair in continuous:
                    prev_start, prev_end = continuous[ids_pair]
                    endpoints = min(prev_start, start), max(prev_end, end)
                else:
                    endpoints = (start, end)
                continuous[ids_pair] = endpoints
    return {**discrete, **continuous}
Esempio n. 29
0
def OBB(vecs, r_indices=None, eps=1e-6):
    """Convex hull を用いたOBBを返す。
    Z->Y->Xの順で長さが最少となる軸を求める。
    :param vecs: list of 2D/3D array
    :type vecs: list | tuple | numpy.ndarray
    :param r_indices: listを渡すとconvexhullの結果を格納する
    :type r_indices: None | list
    :param eps: 種々の計算の閾値
    :return:
        (matrix, obb_size)
        matrix:
            OBBの回転と中心座標を表す。OBBが二次元ベクトルの場合は3x3、
            三次元なら4x4。
        obb_size:
            OBBの各軸の長さ。OBBと同じ次元。
    :rtype: (numpy.ndarray, numpy.ndarray)
    """

    if len(vecs) == 0:
        return None, None

    # if not isinstance(vecs, _np.ndarray):
    #     vecs = _array(vecs, dtype=_np.float64)
    vecs = _np.asanyarray(vecs, dtype=_np.float64)

    # 2D ----------------------------------------------------------------------
    if len(vecs[0]) == 2:
        mat = _np.identity(3)
        bb_size = _array([0., 0.])

        indices = _convex_hull_2d(vecs, eps)
        if r_indices:
            r_indices[:] = indices

        if len(indices) == 1:
            mat[:2, 2] = vecs[0]
        elif len(indices) == 2:
            v1 = vecs[indices[0]]
            v2 = vecs[indices[1]]
            xaxis = _normalized(v2 - v1)
            angle = _math.atan2(xaxis[1], xaxis[0])
            s = _math.sin(angle)
            c = _math.cos(angle)
            mat2 = _array([[c, -s], [s, c]])
            mat[:2, 0] = mat2[:, 0]
            mat[:2, 1] = mat2[:, 1]
            mat[:2, 2] = (v1 + v2) / 2
            bb_size[0] = _norm(v2 - v1)
        else:
            yaxis = _closest_axis_on_plane(vecs, indices)
            angle = _math.atan2(yaxis[1], yaxis[0]) - _math.pi / 2  # X軸
            s = _math.sin(angle)
            c = _math.cos(angle)
            mat2 = _array([[c, -s], [s, c]])
            s = _math.sin(-angle)
            c = _math.cos(-angle)
            imat2 = _array([[c, -s], [s, c]])

            rotvecs = _mul_mat_array(imat2, vecs)
            min_vec = rotvecs.min(axis=0)
            max_vec = rotvecs.max(axis=0)
            bb_size = max_vec - min_vec
            loc = (min_vec + max_vec) / 2

            mat[:2, 0] = mat2[:, 0]
            mat[:2, 1] = mat2[:, 1]
            mat[:2, 2] = _mul_mat_vec(mat2, loc)
        return mat, bb_size

    # 3D ----------------------------------------------------------------------
    pool = _mp.Pool()

    mat = _np.identity(4)
    bb_size = _array([0., 0., 0.])

    indices = _convex_hull_3d(vecs, eps)

    if r_indices:
        r_indices[:] = indices

    if isinstance(indices[0], int):  # 2d
        if len(indices) == 1:
            mat[:3, 3] = vecs[0]
            return mat, bb_size

        elif len(indices) == 2:
            # 同一線上
            v1 = vecs[indices[0]]
            v2 = vecs[indices[1]]
            xaxis = _normalized(v2 - v1)
            quat = _rotation_difference_v3v3(_array([1., 0., 0.]), xaxis)
            mat = _np.identity(4)
            mat[:3, :3] = _quat_to_mat3(quat)
            mat[:3, 3] = (v1 + v2) / 2
            bb_size[0] = _norm(v2 - v1)
            return mat, bb_size

        else:
            # 同一平面上
            medium = _np.sum(vecs, axis=0) / len(vecs)
            v1 = vecs[_norm(vecs - medium, axis=1).argmax()]
            v2 = vecs[_norm(vecs - v1, axis=1).argmax()]
            line = v2 - v1
            v3 = vecs[_norm(_cross(line, vecs - v1), axis=1).argmax()]

            zaxis = _normal_tri(v1, v2, v3)
            if zaxis[2] < 0.0:
                zaxis *= -1

            quat = _rotation_difference_v3v3(zaxis, _array([0., 0., 1.]))
            # rotvecs = [_mul_qt_v3(quat, v)[:2] for v in vecs]
            result = pool.starmap_async(_mul_qt_v3, zip(_repeat(quat), vecs))
            rotvecs = [v[:2] for v in result.get()]
            indices_2d = indices

    else:  # 3d
        indices_set = set(_chain.from_iterable(indices))
        zaxis = None
        dist = 0.0
        # 最も距離の近い面(平面)と頂点を求める
        for tri in indices:
            v1, v2, v3 = [vecs[i] for i in tri]
            normal = _normal_tri(v1, v2, v3)
            plane = _plane(v1, normal)
            d = 0.0
            for v4 in (vecs[i] for i in indices_set if i not in tri):
                f = abs(_distance_point_to_plane(v4, plane))
                d = max(f, d)
            if zaxis is None or d < dist:
                zaxis = -normal
                dist = d

        quat = _rotation_difference_v3v3(zaxis, _array([0., 0., 1.]))
        # rotvecs = [_np.resize(_mul_qt_v3(quat, v), 2) for v in vecs]
        # rotvecs = [_mul_qt_v3(quat, v)[:2] for v in vecs]
        result = pool.starmap_async(_mul_qt_v3, zip(_repeat(quat), vecs))
        rotvecs = [v[:2] for v in result.get()]
        indices_2d = _convex_hull_2d(rotvecs, eps)

    yaxis = _closest_axis_on_plane(rotvecs, indices_2d)
    yaxis = _mul_qt_v3(_invert_qt(quat), _np.append(yaxis, 0))

    xaxis = _cross_3d(yaxis, zaxis)
    xaxis = _normalized(xaxis)  # 不要?

    mat[:3, 0] = xaxis
    mat[:3, 1] = yaxis
    mat[:3, 2] = zaxis

    # OBBの大きさと中心を求める
    imat = _np.linalg.inv(mat)
    rotvecs = _mul_mat_array(imat, vecs)
    min_vec = rotvecs.min(axis=0)
    max_vec = rotvecs.max(axis=0)
    bb_size = max_vec - min_vec
    loc = (min_vec + max_vec) / 2
    mat[:3, 3] = _mul_mat_vec(mat, loc)

    pool.close()
    pool.join()
    return mat, bb_size
Esempio n. 30
0
def _convex_hull_3d(vecs, eps=1e-6):
    """三次元の凸包を求める
    :param vecs: list of 3D array
    :type vecs: list | tuple | numpy.ndarray
    :param eps: 距離がこれ以下なら同一平面と見做す
    """

    n = len(vecs)
    if n == 0:
        return []
    elif n == 1:
        return [0]

    verts = [_Vert(i, v) for i, v in enumerate(vecs)]

    # なるべく離れている二頂点を求める
    # medium = _reduce(lambda a, b: a + b, vecs) / len(vecs)
    medium = _np.sum(vecs, axis=0) / len(vecs)

    # v1 = max(verts, key=lambda v: _norm(v.co - medium))
    # v2 = max(verts, key=lambda v: _norm(v.co - v1.co))
    v1 = verts[_norm(vecs - medium, axis=1).argmax()]
    v2 = verts[_norm(vecs - v1.co, axis=1).argmax()]
    line = v2.co - v1.co
    if _norm(line) <= eps:  # 全ての頂点が重なる
        return [0]
    if len(verts) == 2:
        return [v1.index, v2.index]

    # 三角形を構成する為の頂点を求める
    # v3 = max(verts, key=lambda v: _norm(_cross(line, v.co - v1.co)))
    v3 = verts[_norm(_cross(line, vecs - v1.co), axis=1).argmax()]
    # NOTE:
    # np.cross(vec, mat)[0] == np.cross(vec, mat[0])
    # np.cross(mat, vec)[0] == np.cross(mat[0], vec)

    if _norm(_cross_3d(_normalized(line), v3.co - v1.co)) <= eps:
        # 全ての頂点が同一線上にある
        return [v1.index, v2.index]
    if len(verts) == 3:
        return [v1.index, v2.index, v3.index]

    verts.remove(v1)
    verts.remove(v2)
    verts.remove(v3)

    pool = _mp.Pool()

    # 四面体を構成する為の頂点を求める
    normal = _normal_tri(v1.co, v2.co, v3.co)
    plane = _plane(v1.co, normal)

    def key_func(v):
        return abs(_distance_point_to_plane(v.co4d, plane))

    v4 = max(verts, key=key_func)
    if key_func(v4) <= eps:
        # 全ての頂点が平面上にある
        quat = _rotation_difference_v3v3(normal, _array([0., 0., 1.]))
        # vecs_2d = [_np.resize(_mul_qt_v3(quat, v), 2) for v in vecs]
        # vecs_2d = [_mul_qt_v3(quat, v)[:2] for v in vecs]
        result = pool.starmap_async(_mul_qt_v3, zip(_repeat(quat), vecs))
        vecs_2d = [v[:2] for v in result.get()]
        return _convex_hull_2d(vecs_2d, eps)
    verts.remove(v4)

    # 四面体作成
    #       ^ normal
    #    v3 |
    #     / |\
    # v1 /____\ v2
    #    \    /
    #     \  /
    #     v4
    if _distance_point_to_plane(v4.co, v1.co, normal) < 0.0:
        faces = [
            _Face(v1, v2, v3),
            _Face(v1, v4, v2),
            _Face(v2, v4, v3),
            _Face(v3, v4, v1)
        ]
    else:
        faces = [
            _Face(v1, v3, v2),
            _Face(v1, v2, v4),
            _Face(v2, v3, v4),
            _Face(v3, v1, v4)
        ]

    # 残りの頂点を各面に分配
    _divide_outer_verts(faces, verts, eps)

    # edge_faces作成
    edge_faces = _defaultdict(list)
    for face in faces:
        for ekey in face.edge_keys:
            edge_faces[ekey].append(face)

    while True:
        added = False
        for i in range(len(faces)):
            try:
                face = faces[i]
            except:
                break
            if not face.outer_verts:
                continue

            v1 = max(face.outer_verts, key=lambda v: face.distance4d(v.co4d))

            if face.distance4d(v1.co4d) > eps:
                # 凸包になるようにv1から放射状に面を貼る
                added = True

                # 隠れて不要となる面を求める
                remove_faces = set()
                _find_remove_faces_re(remove_faces, v1.co4d, face, edge_faces,
                                      eps)

                # remove_facesを多面体から除去して穴を開ける
                for f in remove_faces:
                    for ekey in f.edge_keys:
                        edge_faces[ekey].remove(f)
                    faces.remove(f)

                # 穴に面を貼る
                new_faces = []
                ekey_count = _defaultdict(int)
                for f in remove_faces:
                    for ekey in f.edge_keys:
                        ekey_count[ekey] += 1
                for ekey, cnt in ekey_count.items():
                    if cnt != 1:
                        continue
                    linkface = edge_faces[ekey][0]
                    v2, v3 = ekey
                    if linkface.verts[linkface.verts.index(v2) - 1] != v3:
                        v2, v3 = v3, v2
                    new_face = _Face(v1, v2, v3)
                    for key in new_face.edge_keys:
                        edge_faces[key].append(new_face)
                    new_faces.append(new_face)
                faces.extend(new_faces)

                # 頂点の再分配
                outer_verts = _reduce(lambda a, b: a + b,
                                      (f.outer_verts for f in remove_faces))
                if v1 in outer_verts:
                    outer_verts.remove(v1)
                _divide_outer_verts(new_faces, outer_verts, eps)

            else:
                face.outer_verts = []

        if not added:
            break

    # 忘れるべからず
    pool.close()
    pool.join()

    return [[v.index for v in f.verts] for f in faces]
Esempio n. 31
0
def OBB(vecs, r_indices=None, eps=1e-6):
    """Convex hull を用いたOBBを返す。
    Z->Y->Xの順で長さが最少となる軸を求める。
    :param vecs: list of 2D/3D array
    :type vecs: list | tuple | numpy.ndarray
    :param r_indices: listを渡すとconvexhullの結果を格納する
    :type r_indices: None | list
    :param eps: 種々の計算の閾値
    :return:
        (matrix, obb_size)
        matrix:
            OBBの回転と中心座標を表す。OBBが二次元ベクトルの場合は3x3、
            三次元なら4x4。
        obb_size:
            OBBの各軸の長さ。OBBと同じ次元。
    :rtype: (numpy.ndarray, numpy.ndarray)
    """

    if len(vecs) == 0:
        return None, None

    # if not isinstance(vecs, _np.ndarray):
    #     vecs = _array(vecs, dtype=_np.float64)
    vecs = _np.asanyarray(vecs, dtype=_np.float64)

    # 2D ----------------------------------------------------------------------
    if len(vecs[0]) == 2:
        mat = _np.identity(3)
        bb_size = _array([0., 0.])

        indices = _convex_hull_2d(vecs, eps)
        if r_indices:
            r_indices[:] = indices

        if len(indices) == 1:
            mat[:2, 2] = vecs[0]
        elif len(indices) == 2:
            v1 = vecs[indices[0]]
            v2 = vecs[indices[1]]
            xaxis = _normalized(v2 - v1)
            angle = _math.atan2(xaxis[1], xaxis[0])
            s = _math.sin(angle)
            c = _math.cos(angle)
            mat2 = _array([[c, -s], [s, c]])
            mat[:2, 0] = mat2[:, 0]
            mat[:2, 1] = mat2[:, 1]
            mat[:2, 2] = (v1 + v2) / 2
            bb_size[0] = _norm(v2 - v1)
        else:
            yaxis = _closest_axis_on_plane(vecs, indices)
            angle = _math.atan2(yaxis[1], yaxis[0]) - _math.pi / 2  # X軸
            s = _math.sin(angle)
            c = _math.cos(angle)
            mat2 = _array([[c, -s], [s, c]])
            s = _math.sin(-angle)
            c = _math.cos(-angle)
            imat2 = _array([[c, -s], [s, c]])

            rotvecs = _mul_mat_array(imat2, vecs)
            min_vec = rotvecs.min(axis=0)
            max_vec = rotvecs.max(axis=0)
            bb_size = max_vec - min_vec
            loc = (min_vec + max_vec) / 2

            mat[:2, 0] = mat2[:, 0]
            mat[:2, 1] = mat2[:, 1]
            mat[:2, 2] = _mul_mat_vec(mat2, loc)
        return mat, bb_size

    # 3D ----------------------------------------------------------------------
    pool = _mp.Pool()

    mat = _np.identity(4)
    bb_size = _array([0., 0., 0.])

    indices = _convex_hull_3d(vecs, eps)

    if r_indices:
        r_indices[:] = indices

    if isinstance(indices[0], int):  # 2d
        if len(indices) == 1:
            mat[:3, 3] = vecs[0]
            return mat, bb_size

        elif len(indices) == 2:
            # 同一線上
            v1 = vecs[indices[0]]
            v2 = vecs[indices[1]]
            xaxis = _normalized(v2 - v1)
            quat = _rotation_difference_v3v3(_array([1., 0., 0.]), xaxis)
            mat = _np.identity(4)
            mat[:3, :3] = _quat_to_mat3(quat)
            mat[:3, 3] = (v1 + v2) / 2
            bb_size[0] = _norm(v2 - v1)
            return mat, bb_size

        else:
            # 同一平面上
            medium = _np.sum(vecs, axis=0) / len(vecs)
            v1 = vecs[_norm(vecs - medium, axis=1).argmax()]
            v2 = vecs[_norm(vecs - v1, axis=1).argmax()]
            line = v2 - v1
            v3 = vecs[_norm(_cross(line, vecs - v1), axis=1).argmax()]

            zaxis = _normal_tri(v1, v2, v3)
            if zaxis[2] < 0.0:
                zaxis *= -1

            quat = _rotation_difference_v3v3(zaxis, _array([0., 0., 1.]))
            # rotvecs = [_mul_qt_v3(quat, v)[:2] for v in vecs]
            result = pool.starmap_async(_mul_qt_v3, zip(_repeat(quat), vecs))
            rotvecs = [v[:2] for v in result.get()]
            indices_2d = indices

    else:  # 3d
        indices_set = set(_chain.from_iterable(indices))
        zaxis = None
        dist = 0.0
        # 最も距離の近い面(平面)と頂点を求める
        for tri in indices:
            v1, v2, v3 = [vecs[i] for i in tri]
            normal = _normal_tri(v1, v2, v3)
            plane = _plane(v1, normal)
            d = 0.0
            for v4 in (vecs[i] for i in indices_set if i not in tri):
                f = abs(_distance_point_to_plane(v4, plane))
                d = max(f, d)
            if zaxis is None or d < dist:
                zaxis = -normal
                dist = d

        quat = _rotation_difference_v3v3(zaxis, _array([0., 0., 1.]))
        # rotvecs = [_np.resize(_mul_qt_v3(quat, v), 2) for v in vecs]
        # rotvecs = [_mul_qt_v3(quat, v)[:2] for v in vecs]
        result = pool.starmap_async(_mul_qt_v3, zip(_repeat(quat), vecs))
        rotvecs = [v[:2] for v in result.get()]
        indices_2d = _convex_hull_2d(rotvecs, eps)


    yaxis = _closest_axis_on_plane(rotvecs, indices_2d)
    yaxis = _mul_qt_v3(_invert_qt(quat), _np.append(yaxis, 0))

    xaxis = _cross_3d(yaxis, zaxis)
    xaxis = _normalized(xaxis)  # 不要?

    mat[:3, 0] = xaxis
    mat[:3, 1] = yaxis
    mat[:3, 2] = zaxis

    # OBBの大きさと中心を求める
    imat = _np.linalg.inv(mat)
    rotvecs = _mul_mat_array(imat, vecs)
    min_vec = rotvecs.min(axis=0)
    max_vec = rotvecs.max(axis=0)
    bb_size = max_vec - min_vec
    loc = (min_vec + max_vec) / 2
    mat[:3, 3] = _mul_mat_vec(mat, loc)

    pool.close()
    pool.join()
    return mat, bb_size
Esempio n. 32
0
def _convex_hull_3d(vecs, eps=1e-6):
    """三次元の凸包を求める
    :param vecs: list of 3D array
    :type vecs: list | tuple | numpy.ndarray
    :param eps: 距離がこれ以下なら同一平面と見做す
    """

    n = len(vecs)
    if n == 0:
        return []
    elif n == 1:
        return [0]

    verts = [_Vert(i, v) for i, v in enumerate(vecs)]

    # なるべく離れている二頂点を求める
    # medium = _reduce(lambda a, b: a + b, vecs) / len(vecs)
    medium = _np.sum(vecs, axis=0) / len(vecs)

    # v1 = max(verts, key=lambda v: _norm(v.co - medium))
    # v2 = max(verts, key=lambda v: _norm(v.co - v1.co))
    v1 = verts[_norm(vecs - medium, axis=1).argmax()]
    v2 = verts[_norm(vecs - v1.co, axis=1).argmax()]
    line = v2.co - v1.co
    if _norm(line) <= eps:  # 全ての頂点が重なる
        return [0]
    if len(verts) == 2:
        return [v1.index, v2.index]

    # 三角形を構成する為の頂点を求める
    # v3 = max(verts, key=lambda v: _norm(_cross(line, v.co - v1.co)))
    v3 = verts[_norm(_cross(line, vecs - v1.co), axis=1).argmax()]
    # NOTE:
    # np.cross(vec, mat)[0] == np.cross(vec, mat[0])
    # np.cross(mat, vec)[0] == np.cross(mat[0], vec)

    if _norm(_cross_3d(_normalized(line), v3.co - v1.co)) <= eps:
        # 全ての頂点が同一線上にある
        return [v1.index, v2.index]
    if len(verts) == 3:
        return [v1.index, v2.index, v3.index]

    verts.remove(v1)
    verts.remove(v2)
    verts.remove(v3)

    pool = _mp.Pool()

    # 四面体を構成する為の頂点を求める
    normal = _normal_tri(v1.co, v2.co, v3.co)
    plane = _plane(v1.co, normal)
    def key_func(v):
        return abs(_distance_point_to_plane(v.co4d, plane))
    v4 = max(verts, key=key_func)
    if key_func(v4) <= eps:
        # 全ての頂点が平面上にある
        quat = _rotation_difference_v3v3(normal, _array([0., 0., 1.]))
        # vecs_2d = [_np.resize(_mul_qt_v3(quat, v), 2) for v in vecs]
        # vecs_2d = [_mul_qt_v3(quat, v)[:2] for v in vecs]
        result = pool.starmap_async(_mul_qt_v3, zip(_repeat(quat), vecs))
        vecs_2d = [v[:2] for v in result.get()]
        return _convex_hull_2d(vecs_2d, eps)
    verts.remove(v4)

    # 四面体作成
    #       ^ normal
    #    v3 |
    #     / |\
    # v1 /____\ v2
    #    \    /
    #     \  /
    #     v4
    if _distance_point_to_plane(v4.co, v1.co, normal) < 0.0:
        faces = [_Face(v1, v2, v3),
                 _Face(v1, v4, v2), _Face(v2, v4, v3), _Face(v3, v4, v1)]
    else:
        faces = [_Face(v1, v3, v2),
                 _Face(v1, v2, v4), _Face(v2, v3, v4), _Face(v3, v1, v4)]

    # 残りの頂点を各面に分配
    _divide_outer_verts(faces, verts, eps)

    # edge_faces作成
    edge_faces = _defaultdict(list)
    for face in faces:
        for ekey in face.edge_keys:
            edge_faces[ekey].append(face)

    while True:
        added = False
        for i in range(len(faces)):
            try:
                face = faces[i]
            except:
                break
            if not face.outer_verts:
                continue

            v1 = max(face.outer_verts, key=lambda v: face.distance4d(v.co4d))

            if face.distance4d(v1.co4d) > eps:
                # 凸包になるようにv1から放射状に面を貼る
                added = True

                # 隠れて不要となる面を求める
                remove_faces = set()
                _find_remove_faces_re(remove_faces, v1.co4d, face, edge_faces,
                                      eps)

                # remove_facesを多面体から除去して穴を開ける
                for f in remove_faces:
                    for ekey in f.edge_keys:
                        edge_faces[ekey].remove(f)
                    faces.remove(f)

                # 穴に面を貼る
                new_faces = []
                ekey_count = _defaultdict(int)
                for f in remove_faces:
                    for ekey in f.edge_keys:
                        ekey_count[ekey] += 1
                for ekey, cnt in ekey_count.items():
                    if cnt != 1:
                        continue
                    linkface = edge_faces[ekey][0]
                    v2, v3 = ekey
                    if linkface.verts[linkface.verts.index(v2) - 1] != v3:
                        v2, v3 = v3, v2
                    new_face = _Face(v1, v2, v3)
                    for key in new_face.edge_keys:
                        edge_faces[key].append(new_face)
                    new_faces.append(new_face)
                faces.extend(new_faces)

                # 頂点の再分配
                outer_verts = _reduce(lambda a, b: a + b,
                                      (f.outer_verts for f in remove_faces))
                if v1 in outer_verts:
                    outer_verts.remove(v1)
                _divide_outer_verts(new_faces, outer_verts, eps)

            else:
                face.outer_verts = []

        if not added:
            break

    # 忘れるべからず
    pool.close()
    pool.join()

    return [[v.index for v in f.verts] for f in faces]