Example #1
0
def crt(R, N, view = 0):
    ''' Chinese remainder theorem
        Input: R = Vector of remainders
               N = Vector of modulos
        Output: x = Solution of the system
    '''
    n = _reduce(_mul, N)
        
    M = [int(n/t) for t in N]
    L = [_eea(N[i], M[i])[1] for i in range(0, len(R))]
    C = [R[i]*M[i]*L[i] for i in range(0, len(R))]

    x = _reduce(_add, C) % n
    
    if (view == 1):
        print('The modulo is: {0:10}'.format(n))
        print()
        print('{0:3} {1:15} {2:15} {3:15} {4:15}'.format('i'.rjust(3),'r[i]'.rjust(15),'m[i]'.rjust(15),'l[i]'.rjust(15),'x[i]'.rjust(15)))
        print('---------------------------------------------------------------------')
        for i in range(0, len(R)):
              print('{0:3} {1:15} {2:15} {3:15} {4:15}'.format(str((i + 1)).rjust(3),str(R[i]).rjust(15),str(M[i]).rjust(15),str(L[i]).rjust(15),str(C[i]).rjust(15)))

        print('x = sigma(x[i]) mod {0} = '.format(n))

        sigma = ''
        for i in range(0, len(C) - 1):
            sigma = sigma + str(C[i]) + ' + '

        sigma = sigma + str(C[len(C) - 1]) + ' = ' + str(x)
        print(sigma)
            
    return x
Example #2
0
    def product(self, indices):
        """
        Returns the index/label of corresponding to the product of a list
        or tuple of indices/labels.

        Parameters
        ----------
        indices : iterable
            Specifies the sequence of group elements to include in the matrix 
            product.  If `indices` contains integers, they an interpreted as
            group element indices, and an integer is returned.  Otherwise,
            `indices` is assumed to contain group element labels, and a label
            is returned.
        
        Returns
        -------
        int or str
            If `indices` contains integers, returns the resulting element's
            index.  Otherwise returns the resulting element's label.
        """
        if len(indices) == 0: return None
        if is_integer(indices[0]):
            return _reduce(lambda i, j: self.product_table[i, j], indices)
        else:
            indices = [self.label_indices[i] for i in indices]
            fi = _reduce(lambda i, j: self.product_table[i, j], indices)
            return self.labels[fi]
Example #3
0
def scatter_nd(indices, updates, shape, reduction='sum', dev_str=None):
    if dev_str is None:
        dev_str = _dev_str_callable(updates)
    shape = list(shape)
    dtype = updates.dtype
    if reduction == 'sum':
        return _tf.scatter_nd(indices, updates, shape)
    elif reduction == 'min':
        func = _tf.compat.v1.scatter_min
        initial_val = _tf.cast(_tf.constant(2**31 - 1), dtype)
    elif reduction == 'max':
        func = _tf.compat.v1.scatter_max
        initial_val = _tf.cast(_tf.constant(-(2**31 - 1)), dtype)
    else:
        raise Exception(
            'reduction is {}, but it must be one of "sum", "min" or "max"'.
            format(reduction))
    indices_shape = indices.shape
    num_index_dims = indices_shape[-1]
    result_dim_sizes_list = [
        _reduce(_mul, shape[i + 1:], 1) for i in range(len(shape) - 1)
    ] + [1]
    result_dim_sizes = _tf.constant(result_dim_sizes_list)
    implicit_indices_factor = result_dim_sizes[num_index_dims - 1]
    flat_result_size = _reduce(_mul, shape, 1)
    global TF_SCATTER_VAR
    if flat_result_size not in TF_SCATTER_VAR:
        TF_SCATTER_VAR[flat_result_size] = {
            dtype:
            _tf.Variable(_tf.ones(flat_result_size, dtype=dtype) * initial_val,
                         trainable=False)
        }
    elif dtype not in TF_SCATTER_VAR[flat_result_size]:
        TF_SCATTER_VAR[flat_result_size][dtype] = _tf.Variable(
            _tf.ones(flat_result_size, dtype=dtype) * initial_val,
            trainable=False)
    else:
        TF_SCATTER_VAR[flat_result_size][dtype].assign(
            _tf.ones(flat_result_size, dtype=dtype) * initial_val)
    flat_updates = _tf.reshape(updates, (-1, ))
    new_shape = [1] * (len(indices_shape) - 1) + [num_index_dims]
    indices_scales = _tf.reshape(result_dim_sizes[0:num_index_dims], new_shape)
    indices_for_flat_tiled = _tf.tile(
        _tf.reshape(
            _tf.reduce_sum(indices * indices_scales, -1, keepdims=True),
            (-1, 1)), [1, implicit_indices_factor])
    implicit_indices = _tf.tile(
        _tf.expand_dims(_tf.range(implicit_indices_factor), 0),
        _tf.stack((_tf.shape(indices_for_flat_tiled)[0], _tf.constant(1))))
    indices_for_flat = indices_for_flat_tiled + implicit_indices
    flat_indices_for_flat = _tf.reshape(indices_for_flat, (-1, ))
    flat_scatter = _tf.convert_to_tensor(
        func(TF_SCATTER_VAR[flat_result_size][dtype], flat_indices_for_flat,
             flat_updates))
    flat_scatter = _tf.where(flat_scatter == initial_val,
                             _tf.zeros(flat_result_size, dtype=updates.dtype),
                             flat_scatter)
    with _tf.device('/' + dev_str.upper()):
        res = _tf.reshape(flat_scatter, list(shape))
        return res
Example #4
0
def test_unstack():
    for lib, call in helpers.calls:
        if call is helpers.mx_graph_call:
            # mxsymbolic split returns either list or tensor depending on number of splits
            continue
        x = np.swapaxes(np.array([[0.]]), 0, 0)
        true = [np.array(item) for item in x.tolist()]
        pred = call(ivy_gen.unstack,
                    ivy_gen.array([[0.]], f=lib),
                    0,
                    num_outputs=1)
        assert _reduce(
            _mul,
            [np.array_equal(pred_, true_)
             for pred_, true_ in zip(pred, true)], 1) == 1
        x = np.swapaxes(np.array([[[0.]]]), 0, 0)
        true = [np.array(item) for item in x.tolist()]
        pred = call(ivy_gen.unstack,
                    ivy_gen.array([[[0.]]], f=lib),
                    0,
                    num_outputs=1)
        assert _reduce(
            _mul,
            [np.array_equal(pred_, true_)
             for pred_, true_ in zip(pred, true)], 1) == 1
Example #5
0
def _recursive_crank(p, t, n, known=[], style="standard"):
    from sage.all import Partitions
    if known == []:
        known = [
            _Igusa_braid_table(p, t, k, style=style)
            for k in range(_TABLE_CUTOFF + 1)
        ]
    k = len(known) + 1
    Zk = 0
    for L in Partitions(k):
        if len(L) > 1:
            if style == "reduced":
                L_factors = [_P(L), 1, t**(_binom_sum(L)), _factorial(len(L))]
            else:
                L_factors = [
                    _P(L), p**(1 - _reduce(lambda x, y: x + y - 1, L)),
                    t**(_binom_sum(L)),
                    _Poincare(L)(Y=-p**-1)
                ]
            lower_integrals = list(map(lambda z: known[z - 1], list(L)))
            L_term = _reduce(lambda x, y: x * y, L_factors + lower_integrals,
                             1)
            Zk += L_term
    if style == "reduced":
        Zk = Zk / (1 - t**(_binomial(k, 2)))
    else:
        Zk = Zk / (1 - p**(-k + 1) * t**(_binomial(k, 2)))
    known += [Zk]
    if k - 1 < n:
        return _recursive_crank(p, t, n, known=known, style=style)
    else:
        return known[n]
Example #6
0
def scatter_nd(indices, updates, shape, reduction='sum', dev_str=None):
    if dev_str is None:
        dev_str = _callable_dev_str(updates)
    shape = list(shape)
    dtype = updates.dtype
    indices_shape = indices.shape
    num_index_dims = indices_shape[-1]
    result_dim_sizes_list = [
        _reduce(mul, shape[i + 1:], 1) for i in range(len(shape) - 1)
    ] + [1]
    result_dim_sizes = torch.tensor(result_dim_sizes_list).to(
        str_to_dev(dev_str))
    implicit_indices_factor = int(result_dim_sizes[num_index_dims - 1].item())
    flat_result_size = _reduce(mul, shape, 1)
    if reduction == 'sum':
        initial_val = torch.tensor(0).type(dtype).to(str_to_dev(dev_str))
    elif reduction == 'min':
        initial_val = torch.tensor(1e12).type(dtype).to(str_to_dev(dev_str))
    elif reduction == 'max':
        initial_val = torch.tensor(-1e12).type(dtype).to(str_to_dev(dev_str))
    else:
        raise Exception(
            'reduction is {}, but it must be one of "sum", "min" or "max"'.
            format(reduction))
    flat_output = torch.ones(flat_result_size, dtype=dtype).to(
        str_to_dev(dev_str)) * initial_val
    flat_updates = torch.reshape(updates, (-1, ))
    new_shape = [1] * (len(indices_shape) - 1) + [num_index_dims]
    indices_scales = torch.reshape(result_dim_sizes[0:num_index_dims],
                                   new_shape)
    indices_for_flat_tiled = torch.reshape(
        torch.sum(indices * indices_scales, -1, keepdim=True),
        (-1, 1)).repeat(*[1, implicit_indices_factor])
    implicit_indices = torch.unsqueeze(
        torch.arange(implicit_indices_factor).to(str_to_dev(dev_str)),
        0).repeat(*[indices_for_flat_tiled.shape[0], 1])
    indices_for_flat = indices_for_flat_tiled + implicit_indices
    flat_indices_for_flat = torch.reshape(indices_for_flat,
                                          (-1, )).type(torch.long)
    global torch_scatter
    if torch_scatter is None:
        try:
            import torch_scatter as torch_scatter
        except:
            raise Exception(
                'Unable to import torch_scatter, verify this is correctly installed.'
            )
    flat_scatter = torch_scatter.scatter(flat_updates,
                                         flat_indices_for_flat,
                                         out=flat_output,
                                         reduce=reduction)
    # noinspection PyTypeChecker
    flat_scatter = torch.where(
        flat_scatter == initial_val,
        torch.zeros(flat_result_size,
                    dtype=updates.dtype).to(str_to_dev(dev_str)), flat_scatter)
    res = torch.reshape(flat_scatter, list(shape))
    return res
Example #7
0
def _top_zeta_function_mul(L, DB=True, verbose=_print, atom=False):
    from sage.all import var
    from .LatticeFlats import _subposet

    P = L.poset
    C = 1 * L.poset.has_top()

    s_name = lambda x: var("s" + str(x))
    if atom:
        if atom:
            atoms = P.upper_covers(P.bottom())

            def s_data(x):
                under_poset = _subposet(P, x, lambda z: P.lower_covers(z))
                elts = filter(lambda y: y in under_poset, atoms)
                ts = map(s_name, elts)
                return _reduce(lambda x, y: x + y, ts, 0)
    else:

        def s_data(x):
            under_poset = _subposet(P, x, lambda z: P.lower_covers(z))
            elts = list(under_poset._elements)
            elts.remove(P.bottom())
            ts = map(s_name, elts)
            return _reduce(lambda x, y: x + y, ts, 0)

    S = {x: s_data(x) for x in P._elements}

    # Base cases for recursion.
    add_em = lambda x, y: x + y
    if P.has_top() and P.rank() == 2:
        atms = P.upper_covers(P.bottom())
        m = len(atms)
        elt_dat = lambda x: 1 / (1 + S[x])
        return _reduce(add_em, map(elt_dat, atms), 2 - m) / (2 + S[P.top()])
    if P.rank() == 1:
        atms = P.upper_covers(P.bottom())
        m = len(atms)
        elt_dat = lambda x: 1 / (1 + S[x])
        return _reduce(add_em, map(elt_dat, atms), 1 - m)

    poincare = _Poincare_polynomial(L)
    Y = poincare(P.bottom()).variables()[0]
    pi_circ = lambda x: (poincare(x) /
                         (1 + Y)**C).factor().simplify().subs({Y: -1})
    x_factor = lambda x: pi_circ(x)
    prop_elts = L.proper_part_poset()._elements
    factors = map(lambda x: x_factor(x), prop_elts)
    integrals = map(
        lambda x: _top_zeta_function_mul(L.subarrangement(x), DB=DB, atom=atom
                                         ), prop_elts)
    pi = pi_circ(P.bottom())
    zeta = _reduce(lambda x, y: x + y[0] * y[1], zip(factors, integrals),
                   0) + pi
    if P.has_top():
        zeta = zeta / (P.rank() + S[P.top()])

    return zeta
Example #8
0
 def _combine_status(self, values):
     status_or = _reduce(_or_, values)
     status_and = _reduce(_and_, values)
     alarm = self.Alarm.NO
     severity = self.Severity.NO
     if status_or != status_and:
         alarm = self.Alarm.COMM
         severity = self.Severity.INVALID
     return {'value': status_or, 'alarm': alarm, 'severity': severity}
Example #9
0
def bilinear_resample(x, warp):
    batch_shape = x.shape[:-3]
    input_image_dims = x.shape[-3:-1]
    batch_shape = list(batch_shape)
    input_image_dims = list(input_image_dims)
    num_feats = x.shape[-1]
    # image statistics
    height, width = input_image_dims
    max_x = width - 1
    max_y = height - 1
    idx_size = _reduce(_mul, warp.shape[-3:-1], 1)
    batch_shape_flat = _reduce(_mul, batch_shape, 1)
    # B
    batch_offsets = _jnp.arange(batch_shape_flat) * idx_size
    # B x (HxW)
    base_grid = _jnp.tile(_jnp.expand_dims(batch_offsets, 1), [1, idx_size])
    # (BxHxW)
    base = _jnp.reshape(base_grid, [-1])
    # (BxHxW) x D
    data_flat = _jnp.reshape(x, [batch_shape_flat * height * width, -1])
    # (BxHxW) x 2
    warp_flat = _jnp.reshape(warp, [-1, 2])
    warp_floored = (_jnp.floor(warp_flat)).astype(_jnp.int32)
    bilinear_weights = warp_flat - _jnp.floor(warp_flat)
    # (BxHxW)
    x0 = warp_floored[:, 0]
    x1 = x0 + 1
    y0 = warp_floored[:, 1]
    y1 = y0 + 1
    x0 = _jnp.clip(x0, 0, max_x)
    x1 = _jnp.clip(x1, 0, max_x)
    y0 = _jnp.clip(y0, 0, max_y)
    y1 = _jnp.clip(y1, 0, max_y)
    base_y0 = base + y0 * width
    base_y1 = base + y1 * width
    idx_a = base_y0 + x0
    idx_b = base_y1 + x0
    idx_c = base_y0 + x1
    idx_d = base_y1 + x1
    # (BxHxW) x D
    Ia = _jnp.take(data_flat, idx_a, axis=0)
    Ib = _jnp.take(data_flat, idx_b, axis=0)
    Ic = _jnp.take(data_flat, idx_c, axis=0)
    Id = _jnp.take(data_flat, idx_d, axis=0)
    # (BxHxW)
    xw = bilinear_weights[:, 0]
    yw = bilinear_weights[:, 1]
    # (BxHxW) x 1
    wa = _jnp.expand_dims((1 - xw) * (1 - yw), 1)
    wb = _jnp.expand_dims((1 - xw) * yw, 1)
    wc = _jnp.expand_dims(xw * (1 - yw), 1)
    wd = _jnp.expand_dims(xw * yw, 1)
    # (BxHxW) x D
    resampled_flat = wa * Ia + wb * Ib + wc * Ic + wd * Id
    # B x H x W x D
    return _jnp.reshape(resampled_flat, batch_shape + [-1, num_feats])
Example #10
0
def test_svd():
    for lib, call in helpers.calls:
        if call is helpers.mx_graph_call:
            # mxnet symbolic does not support svd
            continue
        pred = call(ivy_linalg.svd, ivy_gen.array([[[1., 0.], [0., 1.]]], f=lib))
        true = np.linalg.svd(np.array([[[1., 0.], [0., 1.]]]))
        assert _reduce(_mul, [np.array_equal(pred_, true_) for pred_, true_ in zip(pred, true)], 1) == 1
        pred = call(ivy_linalg.svd, ivy_gen.array([[[[1., 0.], [0., 1.]]]], f=lib))
        true = np.linalg.svd(np.array([[[[1., 0.], [0., 1.]]]]))
        assert _reduce(_mul, [np.array_equal(pred_, true_) for pred_, true_ in zip(pred, true)], 1) == 1
Example #11
0
def _universal(L, anayltic=False, atom=False):
    from sage.all import var
    from .LatticeFlats import _subposet

    # Set up the potential substitutions for T -- as defined in Maglione--Voll.
    if anayltic:
        q = var('q')
        Y = -q**(-1)
        P = L.poset
        t_name = lambda x: var("t" + str(x))
        if atom:
            atoms = P.upper_covers(P.bottom())

            def T_data(x):
                under_poset = _subposet(P, x, lambda z: P.lower_covers(z))
                elts = filter(lambda y: y in under_poset, atoms)
                ts = map(t_name, elts)
                return _reduce(lambda x, y: x * y, ts, q**(-P.rank(x)))
        else:

            def T_data(x):
                under_poset = _subposet(P, x, lambda z: P.lower_covers(z))
                elts = list(under_poset._elements)
                elts.remove(P.bottom())
                ts = map(t_name, elts)
                return _reduce(lambda x, y: x * y, ts, q**(-P.rank(x)))
    else:
        T_data = lambda x: var("T" + str(x))
        Y = var('Y')

    T = {x: T_data(x) for x in L.poset._elements}

    # Base cases for recursion.
    if L.poset.has_top() and L.poset.rank() == 2:
        elts = L.proper_part_poset()._elements
        merge = lambda x, y: x + (1 + Y)**2 * T[y] / (1 - T[y])
        one = L.poset.top()
        return _reduce(merge, elts,
                       (1 + Y) * (1 + (len(elts) - 1) * Y)) / (1 - T[one])
    if L.poset.rank == 1:
        elts = list(L.poset._elements).remove(L.poset.bottom())
        merge = lambda x, y: x + (1 + Y) * T[y] / (1 - T[y])
        return _reduce(merge, elts, 1 + len(elts) * Y)

    P = L.proper_part_poset()
    poincare = _Poincare_polynomial(L, sub=Y)
    recurse = lambda M: _universal(M, anayltic=anayltic, atom=atom)
    num_dat = lambda x: poincare(x) * T[x] * recurse(L.subarrangement(x))
    factors = map(num_dat, P._elements)
    HP = _reduce(lambda x, y: x + y, factors, poincare(L.poset.bottom()))
    if L.poset.has_top():
        HP = HP / (1 - T[L.poset.top()])
    return HP
Example #12
0
def reduce(fun, init, values=None):
    """
    A function that reduces a list to a single value using a given function.

    Complexity: O(n*k) where k is the complexity of the given function
    params:
        fun: the function that should be applied
        values: the list of values we should reduce
    returns:
        the reduced value
    """
    if values is None:
        return _reduce(fun, init)
    else:
        return _reduce(fun, values, init)
Example #13
0
def _P(L):
    from sage.all import Set

    # Compute binom(n, p_1)*binom(n - p_1, p_2)*binom(n - p_1 - p_2, p_3)*...
    def binom(n, P):
        if len(P) == 1:
            return _binomial(n, P[0])
        else:
            return _binomial(n, P[0]) * binom(n - P[0], P[1:])

    n = _reduce(lambda x, y: x + y, L)
    count = lambda n: len(list(filter(lambda x: x == n, L)))
    S = list(Set(list(L)))
    d = _reduce(lambda x, y: x * y, map(lambda z: _factorial(count(z)), S))
    return binom(n, L) // d
Example #14
0
def provisionsets_combinations(provisionsets, choose0=True):

    """
    The authentication security provision sets that are yielded by
    combinations of given sets.

    :param provisionsets:
        A set of security provision sets.
    :type provisionsets: ~[:class:`ProvisionSetABC`]

    :param bool choose0:
        Whether to include the empty choice.  If true, then the empty set of
        security provisions is always included in the result.  Otherwise, it
        is included only if *provisionsets* contains it.

    :rtype: :class:`ProvisionSetABC`

    """

    if choose0:
        yield iter(provisionsets).next().__class__()

    for combination \
            in _chain(*(_combinations(provisionsets, nchoices)
                        for nchoices in range(1, len(provisionsets) + 1))):
        yield _reduce(_or, combination)
Example #15
0
def gather_nd(params, indices, dev_str=None):
    if dev_str is None:
        dev_str = _callable_dev_str(params)
    indices_shape = indices.shape
    params_shape = params.shape
    num_index_dims = indices_shape[-1]
    res_dim_sizes_list = [
        _reduce(_mul, params_shape[i + 1:], 1)
        for i in range(len(params_shape) - 1)
    ] + [1]
    result_dim_sizes = _jnp.array(res_dim_sizes_list)
    implicit_indices_factor = int(result_dim_sizes[num_index_dims - 1].item())
    flat_params = _jnp.reshape(params, (-1, ))
    new_shape = [1] * (len(indices_shape) - 1) + [num_index_dims]
    indices_scales = _jnp.reshape(result_dim_sizes[0:num_index_dims],
                                  new_shape)
    indices_for_flat_tiled = _jnp.tile(
        _jnp.reshape(_jnp.sum(indices * indices_scales, -1, keepdims=True),
                     (-1, 1)), (1, implicit_indices_factor))
    implicit_indices = _jnp.tile(
        _jnp.expand_dims(_jnp.arange(implicit_indices_factor), 0),
        (indices_for_flat_tiled.shape[0], 1))
    indices_for_flat = indices_for_flat_tiled + implicit_indices
    flat_indices_for_flat = _jnp.reshape(indices_for_flat,
                                         (-1, )).astype(_jnp.int32)
    flat_gather = _jnp.take(flat_params, flat_indices_for_flat, 0)
    new_shape = list(indices_shape[:-1]) + list(params_shape[num_index_dims:])
    ret = _jnp.reshape(flat_gather, new_shape)
    return to_dev(ret, dev_str)
Example #16
0
    def _toposort(cls, data: Dict[str, Set[str]]) -> Iterable[Set[str]]:
        # Copied from https://bitbucket.org/ericvsmith/toposort/src/25b5894c4229cb888f77cf0c077c05e2464446ac/toposort.py?at=default
        # -> Apache 2.0 license, Copyright 2014 True Blade Systems, Inc.

        # Special case empty input.
        if len(data) == 0:
            return

        # Copy the input so as to leave it unmodified.
        data = data.copy()

        # Ignore self dependencies.
        for k, v in data.items():
            v.discard(k)
        # Find all items that don't depend on anything.
        extra_items_in_deps = _reduce(set.union, data.values()) - set(
            data.keys())
        # Add empty dependences where needed.
        data.update({item: set() for item in extra_items_in_deps})
        while True:
            ordered = {item for item, dep in data.items() if len(dep) == 0}
            if not ordered:
                break
            yield ordered
            data = {
                item: (dep - ordered)
                for item, dep in data.items() if item not in ordered
            }
        if len(data) != 0:
            yield cls._modified_dfs(data)
Example #17
0
def Roll(roll_description):
    roll = _parse_roll_description(roll_description)
    output = list()
    total = 0
    for _ in range(roll.count):
        result = _choice(_simple_side_generation(roll.sides))
        output.append(result)
    if roll.keep_best:
        output.sort()
        output.reverse()
        output = output[:roll.keep_best]
    output = map(lambda x: (x, roll.per_die), output)
    total = _reduce(_add, _reduce(_add, output))
    total += roll.post_roll
    total = total if total >= 0 else 0
    return (total, output)
Example #18
0
def p005(lower_bound, upper_bound):
	factors = _Counter()
	for i in range(lower_bound, upper_bound + 1):
		for multiple, count in _Counter(factor(i)).items():
			if count > factors.get(multiple, 0):
				factors[multiple] = count
	return _reduce(_mul, (k ** v for k, v in factors.items()))
Example #19
0
def _top_zeta_function_uni(L, DB=True, verbose=_print):
    from sage.all import var

    P = L.poset
    s = var('s')
    C = 1 * L.poset.has_top()

    # Base cases for recursion.
    if P.has_top() and P.rank() == 2:
        m = len(P) - 2
        return (2 + (2 - m) * s) / ((2 + m * s) * (1 + s))
    if P.rank() == 1:
        m = len(P) - 1
        return (1 + (1 - m) * s) / (1 + s)

    poincare = _Poincare_polynomial(L)
    Y = poincare(P.bottom()).variables()[0]
    pi_circ = lambda x: (poincare(x) /
                         (1 + Y)**C).factor().simplify().subs({Y: -1})
    eq_elt_data = L._combinatorial_eq_elts()
    factors = map(lambda x: x[1] * pi_circ(x[0]), eq_elt_data)
    integrals = map(lambda x: _top_zeta_function_uni(x[2], DB=DB), eq_elt_data)
    pi = pi_circ(P.bottom())
    zeta = _reduce(lambda x, y: x + y[0] * y[1], zip(factors, integrals),
                   0) + pi
    if C == 1:
        zeta = zeta / (P.rank() + len(L.atoms()) * s)

    return zeta
Example #20
0
def toposort(data):
    """
    Dependencies are expressed as a dictionary whose keys are items
    and whose values are a set of dependent items. Output is a list of
    sets in topological order. The first set consists of items with no
    dependences, each subsequent set consists of items that depend upon
    items in the preceeding sets.
    """

    # Special case empty input.
    if len(data) == 0:
        return

    # Copy the input so as to leave it unmodified.
    data = data.copy()

    # Ignore self dependencies.
    for k, v in data.items():
        v.discard(k)
    # Find all items that don't depend on anything.
    extra_items_in_deps = _reduce(set.union, data.values()) - set(data.keys())
    # Add empty dependences where needed.
    data.update(dict(([item, set()] for item in extra_items_in_deps)))
    while True:
        ordered = set(item for item, dep in data.items() if len(dep) == 0)
        if not ordered:
            break
        yield ordered
        data = dict(([item, (dep - ordered)]
                for item, dep in data.items()
                    if item not in ordered))
    if len(data) != 0:
        raise ValueError('Cyclic dependencies exist among these items: %s' %
            ', '.join(repr(x) for x in data.items()))
Example #21
0
def get_divisors(n, with_negative=False):
    """Return all the divisors of a number.

    Including the number itself.
    """
    factors = { }
    divisors = [ ]
    
    for f in factorize(n):
        try:
            factors[f] = factors[f] + 1
        except KeyError:
            factors[f] = 1

    for c in factors.values():
        c = list(range(c))

    f = tuple(factors.keys())
    c = tuple(factors.values())

    for p in _product(*[list(range(t + 1)) for t in c]):
        d = int(_reduce(_mul, ([f[i] ** p[i] for i in range(len(f))])))
        divisors.append(d)

    if with_negative:
        divisors.extend([-d for d in divisors])
        
    return sorted(divisors)
Example #22
0
def toposort(data):
    """
    Topological sort implementation via:
    https://bitbucket.org/ericvsmith/toposort/src/e63ddf93ccb68a7e33ba97680ecdb72ea9f96669/toposort.py
    """
    from functools import reduce as _reduce

    # Special case empty input.
    if len(data) == 0:
        return

    # Copy the input so as to leave it unmodified.
    data = data.copy()

    # Ignore self dependencies.
    for k, v in data.items():
        v.discard(k)
    # Find all items that don't depend on anything.
    extra_items_in_deps = _reduce(set.union, data.values()) - set(data.keys())
    # Add empty dependences where needed.
    data.update({item: set() for item in extra_items_in_deps})
    while True:
        ordered = set(item for item, dep in data.items() if len(dep) == 0)
        if not ordered:
            break
        yield ordered
        data = {
            item: (dep - ordered)
            for item, dep in data.items() if item not in ordered
        }
    if len(data) != 0:
        raise Exception()
Example #23
0
    def h5_file_size(h5_obj_or_filepath):
        """
        Get file size of h5 file contents.

        :param h5_obj_or_filepath: Filepath where the container object is saved to disk, or h5 object.
        :type h5_obj_or_filepath: str or h5 obj
        :return: Size of h5 file contents, and batch size.
        """
        if type(h5_obj_or_filepath) is str:
            h5_obj = _h5py.File(h5_obj_or_filepath, 'r')
        else:
            h5_obj = h5_obj_or_filepath

        size = 0
        batch_size = 0
        for key, value in sorted(h5_obj.items()):
            if isinstance(value, _h5py.Group):
                size_to_add, batch_size = Container.h5_file_size(value)
                size += size_to_add
            elif isinstance(value, _h5py.Dataset):
                value_shape = value.shape
                size += _reduce(_mul, value_shape, 1) * value.dtype.itemsize
                batch_size = value_shape[0]
            else:
                raise Exception('Item found inside h5_obj which was neither a Group nor a Dataset.')
        return size, batch_size
Example #24
0
def _toposort(data):
    """Dependencies are expressed as a dictionary whose keys are items
and whose values are a set of dependent items. Output is a list of
sets in topological order. The first set consists of items with no
dependences, each subsequent set consists of items that depend upon
items in the preceding sets.
"""

    # Special case empty input.
    if len(data) == 0:
        return

    # Ignore self dependencies.
    for k, v in data.items():
        v.discard(k)
    # Find all items that don't depend on anything.
    extra_items_in_deps = _reduce(set.union, data.values()) - set(data.keys())
    # Add empty dependences where needed.
    data.update({item: set() for item in extra_items_in_deps})
    while True:

        ordered = sorted(set(item for item, dep in data.items() if len(dep) == 0))
        if not ordered:
            break

        for item in ordered:
            yield item
            data.pop(item, None)

        for dep in sorted(data.values()):
            dep -= set(ordered)

    if len(data) != 0:
        msg = 'Cyclic dependencies exist among these items: {}'
        raise CondaValueError(msg.format(' -> '.join(repr(x) for x in data.keys())))
Example #25
0
def bilinear_resample(x,
                      warp,
                      batch_shape=None,
                      input_image_dims=None,
                      _=None):
    if batch_shape is None:
        batch_shape = _ivy.shape(x)[:-3]
    if input_image_dims is None:
        input_image_dims = _ivy.shape(x)[-3:-1]
    batch_shape = list(batch_shape)
    input_image_dims = list(input_image_dims)
    batch_shape_product = _reduce(_mul, batch_shape, 1)
    warp_flat = _mx.nd.reshape(warp,
                               [batch_shape_product] + input_image_dims + [2])
    warp_flat_x = 2 * warp_flat[..., 0:1] / (input_image_dims[1] - 1) - 1
    warp_flat_y = 2 * warp_flat[..., 1:2] / (input_image_dims[0] - 1) - 1
    warp_flat_scaled = _mx.nd.concat(warp_flat_x, warp_flat_y, dim=-1)
    warp_flat_trans = _mx.nd.transpose(warp_flat_scaled, (0, 3, 1, 2))
    mat_flat = _mx.nd.reshape(x,
                              [batch_shape_product] + input_image_dims + [-1])
    mat_flat_trans = _mx.nd.transpose(mat_flat, (0, 3, 1, 2))
    interpolated_flat_transposed = _mx.nd.BilinearSampler(
        mat_flat_trans, warp_flat_trans)
    interpolated_flat = _mx.nd.transpose(interpolated_flat_transposed,
                                         (0, 2, 3, 1))
    return _mx.nd.reshape(interpolated_flat,
                          batch_shape + input_image_dims + [-1])
Example #26
0
def get_dependency_graph(component):
    """
    Generate a component's graph of dependencies, which can be passed to
    :func:`run` or :func:`run_incremental`.
    """
    if component not in DEPENDENCIES:
        raise Exception("%s is not a registered component." % get_name(component))

    if not DEPENDENCIES[component]:
        return {component: set()}

    graph = defaultdict(set)

    def visitor(c, parent):
        if parent is not None:
            graph[parent].add(c)

    walk_dependencies(component, visitor)

    graph = dict(graph)

    # Find all items that don't depend on anything.
    extra_items_in_deps = _reduce(set.union, graph.values(), set()) - set(graph.keys())

    # Add empty dependencies where needed.
    graph.update(dict((item, set()) for item in extra_items_in_deps))

    return graph
    def _toposort(self, data):
        # Copied from https://bitbucket.org/ericvsmith/toposort/src/25b5894c4229cb888f77cf0c077c05e2464446ac/toposort.py?at=default
        # -> Apache 2.0 license, Copyright 2014 True Blade Systems, Inc.
        
        # Special case empty input.
        if len(data) == 0:
            return

        # Copy the input so as to leave it unmodified.
        data = data.copy()

        # Ignore self dependencies.
        for k, v in data.items():
            v.discard(k)
        # Find all items that don't depend on anything.
        extra_items_in_deps = _reduce(set.union, data.values()) - set(data.keys())
        # Add empty dependences where needed.
        data.update({item:set() for item in extra_items_in_deps})
        while True:
            ordered = set(item for item, dep in data.items() if len(dep) == 0)
            if not ordered:
                break
            yield ordered
            data = {item: (dep - ordered)
                    for item, dep in data.items()
                        if item not in ordered}
        if len(data) != 0:
            raise ValueError('Cyclic dependencies exist among these items: {}'.format(', '.join(repr(x) for x in data.items())))
Example #28
0
def toposort(data):
    """Dependencies are expressed as a dictionary whose keys are items
and whose values are a set of dependent items. Output is a list of
sets in topological order. The first set consists of items with no
dependencies, each subsequent set consists of items that depend upon
items in the preceding sets.
"""

    # Special case empty input.
    if len(data) == 0:
        return

    # Copy the input so as to leave it unmodified.
    data = data.copy()

    # Ignore self dependencies.
    for k, v in data.items():
        v.discard(k)
    # Find all items that don't depend on anything.
    extra_items_in_deps = _reduce(set.union, data.values()) - set(data.keys())
    # Add empty dependencies where needed.
    data.update({item: set() for item in extra_items_in_deps})
    while True:
        ordered = set(item for item, dep in data.items() if len(dep) == 0)
        if not ordered:
            break
        yield ordered
        data = {
            item: (dep - ordered)
            for item, dep in data.items() if item not in ordered
        }
    if len(data) != 0:
        raise ValueError(
            'Cyclic dependencies exist among these items: {}'.format(', '.join(
                repr(x) for x in data.items())))
Example #29
0
def toposort(data):

    # Special case empty input.
    if len(data) == 0:
        return

    # Copy the input so as to leave it unmodified.
    data = data.copy()

    # Ignore self dependencies.
    for k, v in data.items():
        v.discard(k)
    # Find all items that don't depend on anything.
    extra_items_in_deps = _reduce(set.union, data.values()) - set(data.keys())
    # Add empty dependences where needed.
    data.update({item: set() for item in extra_items_in_deps})
    while True:
        ordered = set(item for item, dep in data.items() if len(dep) == 0)
        if not ordered:
            break
        yield ordered
        data = {
            item: (dep - ordered)
            for item, dep in data.items() if item not in ordered
        }
    if len(data) != 0:
        exception_string = \
         'Circular dependencies exist among these items: {{{}}}'.format(
          ', '.join([
           '{!r}:{!r}'.format(key, value) for key, value in sorted(data.items())
          ])
         )
        raise Exception(exception_string)
Example #30
0
def lcm(*integers):
    '''Return least common multiple of a and b'''
    try:
        _math.lcm(*integers)
    except Exception:
        pass
    return _reduce(_lcm1, integers)
Example #31
0
def bilinear_resample(x,
                      warp,
                      batch_shape=None,
                      input_image_dims=None,
                      _=None):
    if batch_shape is None:
        if not _tf.executing_eagerly():
            raise Exception(
                'batch_shape must be provided when in tensorflow graph mode')
        batch_shape = _ivy.shape(x)[:-3]
    if input_image_dims is None:
        if not _tf.executing_eagerly():
            raise Exception(
                'input_image_dims must be provided when in tensorflow graph mode'
            )
        input_image_dims = _ivy.shape(x)[-3:-1]
    batch_shape = list(batch_shape)
    input_image_dims = list(input_image_dims)
    batch_shape_product = _reduce(_mul, batch_shape, 1)
    warp_flat = _tf.reshape(warp, [batch_shape_product] + [-1, 2])
    mat_flat = _tf.reshape(x, [batch_shape_product] + input_image_dims + [-1])
    global _tfa
    if _tfa is None:
        try:
            import tensorflow_addons as _tfa
        except:
            raise Exception(
                'Unable to import tensorflow_addons, verify this is correctly installed.'
            )
    interpolated_flat = _tfa.image.interpolate_bilinear(mat_flat,
                                                        warp_flat,
                                                        indexing='xy')
    return _tf.reshape(interpolated_flat,
                       batch_shape + input_image_dims + [-1])
Example #32
0
    def _toposort(self, data):
        # Copied from https://bitbucket.org/ericvsmith/toposort/src/25b5894c4229cb888f77cf0c077c05e2464446ac/toposort.py?at=default
        # -> Apache 2.0 license, Copyright 2014 True Blade Systems, Inc.

        # Special case empty input.
        if len(data) == 0:
            return

        # Copy the input so as to leave it unmodified.
        data = data.copy()

        # Ignore self dependencies.
        for k, v in data.items():
            v.discard(k)
        # Find all items that don't depend on anything.
        extra_items_in_deps = _reduce(set.union, data.values()) - set(
            data.keys())
        # Add empty dependences where needed.
        data.update({item: set() for item in extra_items_in_deps})
        while True:
            ordered = set(item for item, dep in data.items() if len(dep) == 0)
            if not ordered:
                break
            yield ordered
            data = {
                item: (dep - ordered)
                for item, dep in data.items() if item not in ordered
            }
        if len(data) != 0:
            raise ValueError(
                'Cyclic dependencies exist among these items: {}'.format(
                    ', '.join(repr(x) for x in data.items())))
Example #33
0
def gather_nd(params, indices, dev_str: Optional[str] = None):
    if dev_str is None:
        dev_str = _callable_dev_str(params)
    indices_shape = indices.shape
    params_shape = params.shape
    num_index_dims = indices_shape[-1]
    result_dim_sizes_list = [
        _reduce(mul, params_shape[i + 1:], 1)
        for i in range(len(params_shape) - 1)
    ] + [1]
    result_dim_sizes = torch.tensor(result_dim_sizes_list).to(
        str_to_dev(dev_str))
    implicit_indices_factor = int(result_dim_sizes[num_index_dims - 1].item())
    flat_params = torch.reshape(params, (-1, ))
    new_shape = [1] * (len(indices_shape) - 1) + [num_index_dims]
    indices_scales = torch.reshape(result_dim_sizes[0:num_index_dims],
                                   new_shape)
    indices_for_flat_tiled = torch.reshape(
        torch.sum(indices * indices_scales, -1, keepdim=True),
        (-1, 1)).repeat(*[1, implicit_indices_factor])
    implicit_indices = torch.unsqueeze(
        torch.arange(implicit_indices_factor).to(str_to_dev(dev_str)),
        0).repeat(*[indices_for_flat_tiled.shape[0], 1])
    indices_for_flat = indices_for_flat_tiled + implicit_indices
    flat_indices_for_flat = torch.reshape(indices_for_flat,
                                          (-1, )).type(torch.long)
    flat_gather = torch.gather(flat_params, 0, flat_indices_for_flat)
    res = torch.reshape(
        flat_gather,
        list(indices_shape[:-1]) + list(params_shape[num_index_dims:]))
    return res
Example #34
0
def _prod(l):
    """
    Computes the product of the list entries.

    :param l: The list to compute the product over.
    :return: The product of the list entries.
    """
    return _reduce(lambda x, y: x * y, l, 1)
Example #35
0
    def __len__(self, context=None):

        if context is not None:
            return 0

        len_ = 0

        for subject_table_iri, subject_class in self._orm_classes.items():
            subject_cols_props = self._orm_columns_properties[subject_table_iri]
            subject_rels = self._orm_relationships[subject_table_iri]

            # sum:
            #   * 1 for each class statement (1 for each row)
            #   * 1 for each literal property statement (1 for each non-null
            #     attribute of each row)
            #   * 1 for each reference property statement (1 for each totally
            #     non-null foreign key value tuple of each row)
            len_ += \
                self._orm.query\
                 (_sqla.func.count(subject_class.__mapper__.primary_key[0])
                   + _reduce
                      (_add,
                       (_sqlaf.sum
                         (_sqla.case(((prop.class_attribute == None,
                                       _sqla.literal(0)),),
                                     else_=_sqla.literal(1)))
                        for prop in subject_cols_props.values()),
                       _sqla.literal(0))
                   + _reduce
                      (_add,
                       (_sqlaf.sum
                         (_sqla.case
                           (((_reduce
                               (_sqla.and_,
                                (subject_cols_props[colname].class_attribute
                                  != None
                                 for colname in colnames),
                                _sqla.literal(True)),
                              _sqla.literal(1)),
                             ),
                            else_=_sqla.literal(0)))
                        for colnames in subject_rels.keys()),
                       _sqla.literal(0)))\
                 .scalar()

        return len_
Example #36
0
def _prod(l):
    """
    Computes the product of the list entries.

    :param l: The list to compute the product over.
    :return: The product of the list entries.
    """
    return _reduce(lambda x, y: x * y, l, 1)
Example #37
0
def p011(grid):
	max = 0
	for row in range(len(grid)):
		for cell in range(len(grid[0])):
			if cell <= len(grid[0]) - 4:
				product = _reduce(_mul, grid[row][cell:cell + 4])
				if product > max: max = product
			if row <= len(grid) - 4:
				product = _reduce(_mul, (grid[row + i][cell] for i in range(4)))
				if product > max: max = product
			if cell <= len(grid[0]) - 4 and row <= len(grid) - 4:
				product = _reduce(_mul, (grid[row + i][cell + i] for i in range(4)))
				if product > max: max = product
			if cell > 4 and row <= len(grid) - 4:
				product = _reduce(_mul, (grid[row + i][cell - i] for i in range(4)))
				if product > max: max = product
	return max
Example #38
0
def histogramdd(
    a, bins=10, range=None, normed=None, weights=None, density=None, **kwargs
):
    np = _np  # Hidden to keep module clean

    with _KWArgs(kwargs) as k:
        bh_cls = k.optional("histogram", None)
        threads = k.optional("threads", None)
        cls = _hist.Histogram if bh_cls is None else bh_cls
        bh_storage = k.optional("storage", _storage.Double())

    if normed is not None:
        raise KeyError(
            "normed=True is not recommended for use in Numpy, and is not supported in boost-histogram; use density=True instead"
        )
    if density and bh_cls is not None:
        raise KeyError(
            "boost-histogram does not support the density keyword when returning a boost-histogram object"
        )

    # Odd numpy design here. Oh well.
    if isinstance(a, np.ndarray):
        a = a.T

    rank = len(a)

    # Integer bins: all the same
    try:
        bins = [int(bins)] * rank
    except TypeError:
        pass

    # Single None -> list of Nones
    if range is None:
        range = [None] * rank

    axs = []
    for n, (b, r) in enumerate(zip(bins, range)):
        if np.issubdtype(type(b), np.integer):
            if r is None:
                # Nextafter may affect bin edges slightly
                r = (np.min(a[n]), np.max(a[n]))
            cpp_ax = _core.axis.regular_numpy(b, r[0], r[1])
            new_ax = _cast(None, cpp_ax, _axis.Axis)
            axs.append(new_ax)
        else:
            b = np.asarray(b, dtype=np.double)
            b[-1] = np.nextafter(b[-1], np.finfo("d").max)
            axs.append(_axis.Variable(b))

    hist = cls(*axs, storage=bh_storage).fill(*a, weight=weights, threads=threads)

    if density:
        areas = _reduce(_mul, hist.axes.widths)
        density = hist.view() / hist.sum() / areas
        return (density, hist.to_numpy()[1:])

    return hist if bh_cls is not None else hist.to_numpy(dd=True)
Example #39
0
def is_prime3(num):
    '''Tests if a given number is prime. Written with reduce.'''
    if num == 2:
        return True
    elif num % 2 == 0 or num <= 1:
        return False
    root = _ceil(_sqrt(num))
    return _reduce(lambda acc, d: False if not acc or num % d == 0 else True,
                   range(3, root+1, 2), True)
Example #40
0
File: qfn.py Project: a-poor/QGates
def matmul(a: np.ndarray,*b: np.ndarray) -> np.ndarray:
    """
    Performs matrix multiplication between
    two or more

    :param a: Start value for matrix multiplication
    :param b: One or more other values for tensor product
    """
    return _reduce(_matmul,b,a)
Example #41
0
 def join(self,cluster_list):
     """
     Returns the smallest cluster that is a supercluster of all given clusters.
     """
     C_groups = self.cluster_groups()
     items = list(_reduce(lambda x,y:x.union(y,sort=False),[C_groups[c] for c in cluster_list],_pd.Index(_np.array([]))).to_numpy())
     rivals = self.clusters_containing(items)
     lens = _np.array([len(self.cluster_groups()[c]) for c in rivals])
     return rivals[_np.argmin(lens)]
Example #42
0
File: ocp.py Project: fosskers/ocp
def align_by_key(key, block):
    '''Given a block of lines that all contain a key, aligns them all
    neatly according to the position of the key.
    TODO: Holy crap make this prettier.
    '''
    line_tokens = [line.split(key, 1) for pos, line in block]
    firsts      = list(map(lambda tokens: tokens[0].rstrip(), line_tokens))
    # Is there a way to do a Haskell-like 'let' in a Python lambda?
    longest = _reduce(lambda ac, i: len(i) if len(i) > ac else ac, firsts, 0)
    for pos, line in enumerate(line_tokens):  # Perform alignment.
        start            = firsts[pos] + (' ' * (longest - len(firsts[pos])))
        line_tokens[pos] = ''.join((start, key, line[1].lstrip()))
    for pos in range(len(block)):  # Repair 'block'.
        block[pos] = (block[pos][0], line_tokens[pos])
    return block
Example #43
0
def _prod(l):
    return _reduce(lambda x,y: x*y, l, 1)
Example #44
0
from mpi4py import MPI
import mpiunittest as unittest
import arrayimpl

try:
    _reduce = reduce
except NameError:
    from functools import reduce as _reduce
prod = lambda sequence,start=1: _reduce(lambda x, y: x*y, sequence, start)

def maxvalue(a):
    try:
        typecode = a.typecode
    except AttributeError:
        typecode = a.dtype.char
    if typecode == ('f'):
        return 1e30
    elif typecode == ('d'):
        return 1e300
    else:
        return 2 ** (a.itemsize * 7) - 1


class BaseTestCCOBuf(object):

    COMM = MPI.COMM_NULL

    def testBarrier(self):
        self.COMM.Barrier()

    def testBcast(self):
Example #45
0
def p008(digits):
	return max(_reduce(_mul, map(int, digits[i:i + 5])) for i in range(len(str(digits)) - 4))
from mpi4py import MPI
import mpiunittest as unittest

try:
    _reduce = reduce
except NameError:
    from functools import reduce as _reduce
cumsum  = lambda seq: _reduce(lambda x, y: x+y, seq, 0)
cumprod = lambda seq: _reduce(lambda x, y: x*y, seq, 1)

_basic = [None,
          True, False,
          -7, 0, 7, 2**31,
          -2**63, 2**63-1,
          -2.17, 0.0, 3.14,
          1+2j, 2-3j,
          'mpi4py',
          ]
messages = _basic
messages += [ list(_basic),
              tuple(_basic),
              dict([('k%d' % key, val)
                    for key, val in enumerate(_basic)])
              ]

class BaseTestCCOObjInter(object):

    BASECOMM  = MPI.COMM_NULL
    INTRACOMM = MPI.COMM_NULL
    INTERCOMM = MPI.COMM_NULL
Example #47
0
 def __hash__(self):
     if self._hash is None:
         self._hash = _reduce(_xor,
                              (hash(pair) for pair in self.iterallitems()))
     return self._hash
Example #48
0
def gmn(*numbers):
    if any(x < 0 for x in numbers):
        raise MathError('<font color=red>gmn( )</font> ' +\
                        translate('MathErrors', _errors['nv']))
    else:
        return rt(_reduce(lambda x, y: x * y, numbers), len(numbers))
Example #49
0
def factorial(num, limit=1):
    """Can impose a limit to stop the multiplication part-way through.
    This represents x!/y! (factorial division) when y is less than x.
    """
    return _reduce(lambda acc, n: acc * n, range(limit, num + 1), 1)
Example #50
0
 def __hash__(self):
     if self._hash is None:
         self._hash = _reduce(_xor,
                              (hash(pair) for pair in self.iteritems()),
                              hash(self._dicttype()))
     return self._hash
Example #51
0
def ltoi(digits):
    """Written with reduce. Written 06/04/2011"""
    return _reduce(lambda acc, n: acc * 10 + n, digits)
Example #52
0
def _convex_hull_3d(vecs, eps=1e-6):
    """三次元の凸包を求める
    :param vecs: list of 3D array
    :type vecs: list | tuple | numpy.ndarray
    :param eps: 距離がこれ以下なら同一平面と見做す
    """

    n = len(vecs)
    if n == 0:
        return []
    elif n == 1:
        return [0]

    verts = [_Vert(i, v) for i, v in enumerate(vecs)]

    # なるべく離れている二頂点を求める
    # medium = _reduce(lambda a, b: a + b, vecs) / len(vecs)
    medium = _np.sum(vecs, axis=0) / len(vecs)

    # v1 = max(verts, key=lambda v: _norm(v.co - medium))
    # v2 = max(verts, key=lambda v: _norm(v.co - v1.co))
    v1 = verts[_norm(vecs - medium, axis=1).argmax()]
    v2 = verts[_norm(vecs - v1.co, axis=1).argmax()]
    line = v2.co - v1.co
    if _norm(line) <= eps:  # 全ての頂点が重なる
        return [0]
    if len(verts) == 2:
        return [v1.index, v2.index]

    # 三角形を構成する為の頂点を求める
    # v3 = max(verts, key=lambda v: _norm(_cross(line, v.co - v1.co)))
    v3 = verts[_norm(_cross(line, vecs - v1.co), axis=1).argmax()]
    # NOTE:
    # np.cross(vec, mat)[0] == np.cross(vec, mat[0])
    # np.cross(mat, vec)[0] == np.cross(mat[0], vec)

    if _norm(_cross_3d(_normalized(line), v3.co - v1.co)) <= eps:
        # 全ての頂点が同一線上にある
        return [v1.index, v2.index]
    if len(verts) == 3:
        return [v1.index, v2.index, v3.index]

    verts.remove(v1)
    verts.remove(v2)
    verts.remove(v3)

    pool = _mp.Pool()

    # 四面体を構成する為の頂点を求める
    normal = _normal_tri(v1.co, v2.co, v3.co)
    plane = _plane(v1.co, normal)
    def key_func(v):
        return abs(_distance_point_to_plane(v.co4d, plane))
    v4 = max(verts, key=key_func)
    if key_func(v4) <= eps:
        # 全ての頂点が平面上にある
        quat = _rotation_difference_v3v3(normal, _array([0., 0., 1.]))
        # vecs_2d = [_np.resize(_mul_qt_v3(quat, v), 2) for v in vecs]
        # vecs_2d = [_mul_qt_v3(quat, v)[:2] for v in vecs]
        result = pool.starmap_async(_mul_qt_v3, zip(_repeat(quat), vecs))
        vecs_2d = [v[:2] for v in result.get()]
        return _convex_hull_2d(vecs_2d, eps)
    verts.remove(v4)

    # 四面体作成
    #       ^ normal
    #    v3 |
    #     / |\
    # v1 /____\ v2
    #    \    /
    #     \  /
    #     v4
    if _distance_point_to_plane(v4.co, v1.co, normal) < 0.0:
        faces = [_Face(v1, v2, v3),
                 _Face(v1, v4, v2), _Face(v2, v4, v3), _Face(v3, v4, v1)]
    else:
        faces = [_Face(v1, v3, v2),
                 _Face(v1, v2, v4), _Face(v2, v3, v4), _Face(v3, v1, v4)]

    # 残りの頂点を各面に分配
    _divide_outer_verts(faces, verts, eps)

    # edge_faces作成
    edge_faces = _defaultdict(list)
    for face in faces:
        for ekey in face.edge_keys:
            edge_faces[ekey].append(face)

    while True:
        added = False
        for i in range(len(faces)):
            try:
                face = faces[i]
            except:
                break
            if not face.outer_verts:
                continue

            v1 = max(face.outer_verts, key=lambda v: face.distance4d(v.co4d))

            if face.distance4d(v1.co4d) > eps:
                # 凸包になるようにv1から放射状に面を貼る
                added = True

                # 隠れて不要となる面を求める
                remove_faces = set()
                _find_remove_faces_re(remove_faces, v1.co4d, face, edge_faces,
                                      eps)

                # remove_facesを多面体から除去して穴を開ける
                for f in remove_faces:
                    for ekey in f.edge_keys:
                        edge_faces[ekey].remove(f)
                    faces.remove(f)

                # 穴に面を貼る
                new_faces = []
                ekey_count = _defaultdict(int)
                for f in remove_faces:
                    for ekey in f.edge_keys:
                        ekey_count[ekey] += 1
                for ekey, cnt in ekey_count.items():
                    if cnt != 1:
                        continue
                    linkface = edge_faces[ekey][0]
                    v2, v3 = ekey
                    if linkface.verts[linkface.verts.index(v2) - 1] != v3:
                        v2, v3 = v3, v2
                    new_face = _Face(v1, v2, v3)
                    for key in new_face.edge_keys:
                        edge_faces[key].append(new_face)
                    new_faces.append(new_face)
                faces.extend(new_faces)

                # 頂点の再分配
                outer_verts = _reduce(lambda a, b: a + b,
                                      (f.outer_verts for f in remove_faces))
                if v1 in outer_verts:
                    outer_verts.remove(v1)
                _divide_outer_verts(new_faces, outer_verts, eps)

            else:
                face.outer_verts = []

        if not added:
            break

    # 忘れるべからず
    pool.close()
    pool.join()

    return [[v.index for v in f.verts] for f in faces]
Example #53
0
def phi(n):
    '''Euler's totient function. Yields the number of coprimes of n.'''
    if n == 1:
        return 1
    factors = prime_factors(n)
    return int(n * _reduce(lambda acc, f: acc * (1 - (1 / f)), factors, 1))
Example #54
0
 def __hash__(self):
     if self._hash is None:
         self._hash = _reduce(_xor, (hash(item) for item in self))
     return self._hash