Пример #1
0
    def distribution(self):
        """
        distribution calculates the number of microstates for each energy level.
        """

        # generate possible combinations out of the states
        list_of_states_at_row = [self.states] * self.height
        rows = list(_product(*list_of_states_at_row))

        list_of_rows = [rows] * self.width

        all_states = list(_product(*list_of_rows))

        all_states_count = len(all_states)

        distribution = []
        for state_i in all_states:
            self.state = state_i
            ene = self._observe__energy(state_i, dist=True)
            single_particle_state_ct = self._single_particle_state_count(
                state_i)
            distribution.append({
                'energy': ene.get('energy'),
                'state': state_i,
                'dist': ene.get('dist'),
                'spin_dist': single_particle_state_ct
            })

        self.dist = {'total_states': all_states_count, 'states': distribution}
Пример #2
0
    def _lex(self, image):
        """
        Runs the actual lexing algorithm.
        """
        parent = self._parent
        union = self._union
        slide = self._slide
        _kind = self._kind
        X, Y = image.size
        cs = self._opinions['codel_size']
        getcolor = image.getpixel
        corners = {}
        rank = {}
        # first we walk over the pixels -- when we encounter whitespace, we
        # compute the sliding extents of the whitespace.  processing pixels
        # via product(range(X), range(Y)), the pixel addresses are explored
        # in lexicographic order -- making iterative constructions downward
        # and rightward simple, and upward or leftward more challenging --
        # see the production rules to the right, and self.slide for details

        # also we merge programming-colored pixels into their blocks using
        # a more-or-less standard DisjointSets datastructure. it's fairly
        # well-known that rank can be used to compute set sizes (valid only
        # at roots) -- we use the same trick to compute the "corners"
        for p in _product(range(0, X, cs), range(0, Y, cs)):
            x, y = p
            kind, color = _kind(getcolor(p))
            for q, z, Z, d0, d1 in [((x+cs, y), x, X-cs, 2, 0),  #peek at pixel to right
                                    ((x, y+cs), y, Y-cs, 3, 1)]: #peek at pixel below (left,right -> up,down)
                if z >= Z:                               #rules: W=white, X=nonwhite, C=specific color
                    if kind == 'slide':                  #------------------------------------------------------
                        slide[slide[p, d0], d1] = p      #W|: right(left(x, y)) := (x, y)
                    continue
                elif z == 0 and kind == 'slide':
                    slide[(p, d0)] = p                   #|W: left(x, y) := (x, y)
                kind1, color1 = _kind(getcolor(q))
                if kind == 'slide':
                    if kind1 == 'slide':
                        slide[q, d0] = slide[(p, d0)]    #WW: left(x+1, y) := left(x, y) 
                    else:
                        slide[slide[(p, d0)], d1] = p    #WX: right(left(x, y)) := (x, y)
                elif kind1 == 'slide':
                    slide[q, d0] = q                     #XW: left(x, y) := (x, y) 
                elif kind == 'code' and color1 == color:
                    union(p, q, rank, corners)           #CC: merge programming pixels of blocks
            if kind == 'code' and p not in parent:
                parent[p] = p
                rank[p] = 1
                corners[p] = {(d, c): p for d in (0, 1, 2, 3) for c in (0, 1)}

        lexemes = self._lexeme
        for p in _product(range(0, X, cs), range(0, Y, cs)):
            if p == parent.get(p):
                lexemes[p] = _Lexeme(p, corners[p], rank[p], getcolor(p))
Пример #3
0
def get_divisors(n, with_negative=False):
    """Return all the divisors of a number.

    Including the number itself.
    """
    factors = { }
    divisors = [ ]
    
    for f in factorize(n):
        try:
            factors[f] = factors[f] + 1
        except KeyError:
            factors[f] = 1

    for c in factors.values():
        c = list(range(c))

    f = tuple(factors.keys())
    c = tuple(factors.values())

    for p in _product(*[list(range(t + 1)) for t in c]):
        d = int(_reduce(_mul, ([f[i] ** p[i] for i in range(len(f))])))
        divisors.append(d)

    if with_negative:
        divisors.extend([-d for d in divisors])
        
    return sorted(divisors)
Пример #4
0
def _assignment(x, values, indices, mode, axis):
    if _is_boolean(indices):
        if ndim(array(indices)) > 1:
            indices_tensor = _tf.where(indices)
            indices = [tuple(ind) for ind in indices_tensor]
        else:
            indices_from_booleans = [
                index for index, val in enumerate(indices) if val
            ]
            indices_along_dims = [range(dim) for dim in shape(x)]
            indices_along_dims[axis] = indices_from_booleans
            indices = list(_product(*indices_along_dims))
    if _tf.rank(values) == 0:
        return _assignment_single_value(x, values, indices, mode, axis)
    values = cast(flatten(array(values)), x.dtype)

    single_index = not isinstance(indices, list)
    if _tf.is_tensor(indices):
        single_index = ndim(indices) <= 1 and sum(indices.shape) <= ndim(x)
    if single_index:
        if len(values) > 1:
            indices = [
                tuple(list(indices[:axis]) + [i] + list(indices[axis:]))
                for i in range(x.shape[axis])
            ]
        else:
            indices = [indices]

    if len(values) != len(indices):
        raise ValueError("Either one value or as many values as indices")

    for i_index, index in enumerate(indices):
        x = _assignment_single_value(x, values[i_index], index, mode, axis)
    return x
Пример #5
0
 def _expand_bond_stereo(sgr):
     bnd_ste_keys = stereogenic_bond_keys(sgr)
     nste_bnds = len(bnd_ste_keys)
     sgrs = [
         _set_bond_stereo_parities(
             sgr, dict(zip(bnd_ste_keys, bnd_ste_par_vals)))
         for bnd_ste_par_vals in _product(bool_vals, repeat=nste_bnds)
     ]
     return sgrs
Пример #6
0
 def _expand_atom_stereo(sgr):
     atm_ste_keys = stereogenic_atom_keys(sgr)
     nste_atms = len(atm_ste_keys)
     sgrs = [
         _set_atom_stereo_parities(
             sgr, dict(zip(atm_ste_keys, atm_ste_par_vals)))
         for atm_ste_par_vals in _product(bool_vals, repeat=nste_atms)
     ]
     return sgrs
Пример #7
0
    def _accumulate(self, seen, counts):
        "Given a 'seen' dict, make a dict of counts at each threshold level"
        nsums = (len(self.recalls) * len(self.precisions))
        sums = [0] * nsums
        for v in seen.values():
            for i in range(nsums):
                sums[i] += (v >> i) & 1 == 1

        for i, (recall, precision) in enumerate(_product(self.recalls, self.precisions)):
            counts[(recall, precision)] = sums[i]
Пример #8
0
	def forall(self,t="product"):
		if t.startswith("pr"):
			_k = list([list(i) for i in _product(self,repeat=2)])
		elif t.startswith("pe"):
			_k = list([list(i) for i in _permutations(self,2)])
		elif t.startswith("c") and t.endswith("s"):
			_k = list([list(i) for i in _combinations(self,2)])
		else:
			_k = list([list(i) for i in _combinations_with_replacement(self,2)])
		return lzlist(_k)
    def intersection_product(self, other):

        if not isinstance(other, usetset_abc):
            other = self.__class__(other)

        if self and other:
            return self.__class__(
                {self._item_class()(item & other_item) for item, other_item in _product(self._items, other._items)}
            )
        else:
            return self.__class__()
Пример #10
0
 def __enter__(self):
     try:
         self._block.add_items(*self._items)
         self._block.add_topics(*self._topics)           
         self._buffers = {k:[] for k in _product(self._items, self._topics)}            
         self._good_init = False
         self._stream_time_start = _perf_counter()
         self._cutoff_mktime = 0            
     except:
         self._block.close()
         raise     
     return self
Пример #11
0
    def intersection_product(self, other):

        if not isinstance(other, usetset_abc):
            other = self.__class__(other)

        if self and other:
            return self.__class__({
                self._item_class()(item & other_item)
                for item, other_item in _product(self._items, other._items)
            })
        else:
            return self.__class__()
Пример #12
0
    def _getseen(self, recprecof):
        isseen = dict()
        for genome, _dict in recprecof.items():
            seen = 0
            for binname, (recall, precision) in _dict.items():
                for i, (min_recall, min_precision) in enumerate(
                        _product(self.recalls, self.precisions)):
                    if recall < min_recall:
                        break

                    if precision >= min_precision:
                        seen |= 1 << i
            isseen[genome] = seen
        return isseen
Пример #13
0
def _type_matcher(argtypes):
    # XXX For long argument lists, the result can get pretty large.
    # It will ensure a very fast lookup at the expense of the
    # dictionary size that is built from the result.  Not sure it
    # matters, though, since it is only uses for overloaded functions.
    """Return a generator producing all possible tuples of types that
    will match the specified argtypes.  Here are examples::
    
    >>> import ctypes
    >>> for item in _type_matcher([ctypes.c_char_p]):
    ...     print item
    (<type 'str'>,)
    (<type 'unicode'>,)
    (<type 'NoneType'>,)
    (<class 'ctypes.c_char_p'>,)
    (<class 'ctypes.LP_c_char'>,)
    
    >>> for item in _type_matcher([POINTER(c_long)]):
    ...     print item
    (<class 'ctypes.c_long'>,)
    (<class '__main__.LP_c_long'>,)
    >>>

    >>> for item in _type_matcher([]):
    ...     print item
    ()
    >>>

    >>> for item in _type_matcher([c_long, c_long]):
    ...     print item
    (<type 'int'>, <type 'int'>)
    (<type 'int'>, <type 'long'>)
    (<type 'int'>, <class 'ctypes.c_long'>)
    (<type 'long'>, <type 'int'>)
    (<type 'long'>, <type 'long'>)
    (<type 'long'>, <class 'ctypes.c_long'>)
    (<class 'ctypes.c_long'>, <type 'int'>)
    (<class 'ctypes.c_long'>, <type 'long'>)
    (<class 'ctypes.c_long'>, <class 'ctypes.c_long'>)
    >>>
    """
    result = []
    for tp in argtypes:
        possible = _argtypes_matches.get(tp, None)
        if possible is None:
            if hasattr(tp, "_type_"):
                # ctypes POINTER(tp) does also accept tp instances
                possible = [tp._type_, tp]
        result.append(possible)
    return _product(*result)
Пример #14
0
 def supports_any_affordances(self, upstream=None, downstream=None):
     if upstream is not None:
         if downstream is not None:
             return any(self.supports_affordances(one_upstream,
                                                  one_downstream)
                        for one_upstream, one_downstream
                        in _product(upstream, downstream))
         else:
             return any(self.supports_affordances(one_upstream, None)
                        for one_upstream in upstream)
     else:
         if downstream is not None:
             return any(self.supports_affordances(None, one_downstream)
                        for one_downstream in downstream)
         else:
             return True
Пример #15
0
    def union_product(self, other):

        if not isinstance(other, usetset_abc):
            other = self.__class__(other)

        if self and other:
            if not self.isfinite or not other.isfinite:
                return self.__class__("*")
            else:
                return self.__class__(
                    {self._item_class()(item | other_item) for item, other_item in _product(self._items, other._items)}
                )
        elif self:
            return self
        else:
            return other
Пример #16
0
        def range(_self, start, stop=None):
            '''
            range([start,] stop) -> itertools.product object
            Returns an iterator that generates the sizes in the range on demand as tuples.

            '''
            if stop is None:
                stop = start
                start = tuple(0 for i in _self)
            if len(_self) != len(start):
                raise TypeError('Expected %d elements in start, got %d' %
                                (len(_self), len(start)))
            if len(_self) != len(stop):
                raise TypeError('Expected %d elements in stop, got %d' %
                                (len(_self), len(stop)))
            return _product(*(range(mn, mx) for mn, mx in zip(start, stop)))
Пример #17
0
    def _getseen(self, recprecof):
        """Make a {genome: isseen} dict, where isseen is a boolean vector
        (implemented as an integer), 1 if a genome is seen at that recall, prec level,
        0 otherwise
        """
        isseen = dict()
        for genome, _dict in recprecof.items():
            seen = 0
            for binname, (recall, precision) in _dict.items():
                for i, (min_recall, min_precision) in enumerate(_product(self.recalls, self.precisions)):
                    if recall < min_recall:
                        break

                    if precision >= min_precision:
                        seen |= 1 << i
            isseen[genome] = seen
        return isseen
Пример #18
0
    def union_product(self, other):

        if not isinstance(other, usetset_abc):
            other = self.__class__(other)

        if self and other:
            if not self.isfinite or not other.isfinite:
                return self.__class__('*')
            else:
                return self.__class__({
                    self._item_class()(item | other_item)
                    for item, other_item in _product(self._items, other._items)
                })
        elif self:
            return self
        else:
            return other
Пример #19
0
def interpolBA(params, ctrlarr, lparams, minfo, models, param=True):
    """ Interpola os `modelos` para os parametros `params` 

    | -params = from emcee minimization
    | -ctrlarr = the fixed value of M, ob(W), Z, H, sig, Rd, h, *n*, cos(i).
    |            If it is not fixed, use np.NaN.
    | -Parametric disk model default (`param` == True).

    This function always returns a valid result (i.e., extrapolations from the
    nearest values are always on).

    If it is a 'Non-squared grid' (asymmetric), it will return a zero array if
    a given model is not found.
    """
    nq = 9
    if not param:
        nq = 8
    if len(ctrlarr) != nq:
        raise ValueError('# Wrong ctrlarr format!!')
    params = params[:_np.sum(_np.isnan(ctrlarr))]
    nlb = len(models[0])
    outmodels = _np.empty((2**len(params), nlb))
    mod = BAmod('')
    parlims = _np.zeros((len(params), 2))
    j = 0
    for i in range(nq):
        if ctrlarr[i] is _np.NaN:
            parlims[j] = [
                _phc.find_nearest(lparams[i], params[j], bigger=False),
                _phc.find_nearest(lparams[i], params[j], bigger=True)
            ]
            j += 1
    j = 0
    for prod in _product(*parlims):
        allpars = _np.array(ctrlarr)
        idx = _np.isnan(allpars)
        allpars[idx] = prod
        mod.build(allpars, lparams)
        idx = mod.getidx(minfo)
        if _np.sum(idx) == 0:
            return _np.zeros(nlb)
        outmodels[j] = models[idx]
        j += 1
    X0 = parlims[:, 0]
    X1 = parlims[:, 1]
    return _phc.interLinND(params, X0, X1, outmodels)
Пример #20
0
def interpolBA(params, ctrlarr, lparams, minfo, models, param=True):
    """ Interpola os `modelos` para os parametros `params` 

    | -params = from emcee minimization
    | -ctrlarr = the fixed value of M, ob(W), Z, H, sig, Rd, h, *n*, cos(i).
    |            If it is not fixed, use np.NaN.
    | -Parametric disk model default (`param` == True).

    This function always returns a valid result (i.e., extrapolations from the
    nearest values are always on).

    If it is a 'Non-squared grid' (asymmetric), it will return a zero array if
    a given model is not found.
    """
    nq = 9
    if not param:
        nq = 8
    if len(ctrlarr) != nq:
        raise ValueError('# Wrong ctrlarr format!!')
    params = params[:_np.sum(_np.isnan(ctrlarr))]
    nlb = len(models[0])
    outmodels = _np.empty((2**len(params), nlb))
    mod = BAmod('')
    parlims = _np.zeros((len(params), 2))
    j = 0
    for i in range(nq):
        if ctrlarr[i] is _np.NaN:
            parlims[j] = [_phc.find_nearest(lparams[i], params[j], 
            bigger=False), _phc.find_nearest(lparams[i], params[j], 
            bigger=True)]
            j += 1
    j = 0
    for prod in _product(*parlims):
        allpars = _np.array(ctrlarr)
        idx = _np.isnan(allpars)
        allpars[idx] = prod
        mod.build(allpars, lparams)
        idx = mod.getidx(minfo)
        if _np.sum(idx) == 0:
            return _np.zeros(nlb)
        outmodels[j] = models[idx]
        j += 1
    X0 = parlims[:, 0]
    X1 = parlims[:, 1]
    return _phc.interLinND(params, X0, X1, outmodels)
Пример #21
0
def check_xdr_limits(xdrminfo, todel=[]):
    """ Check if the XDR file contains models for all parameters within their
    **maximum** interval.

    `todel`: list for dimensions to be skipped in the test. 
    `-1` is usually the cossine of the observer's inclination angle. 
    If `dim=-1` has values from 0 to 1, it is automatically skipped.
    """
    todel = list(todel)
    notdel = []
    for i in range(len(xdrminfo[0])):
        lvals = _np.unique(xdrminfo[:, i])
        if len(lvals) == 1 or (i == len(xdrminfo[0]) - 1
                               and _np.min(lvals) >= 0
                               and _np.max(lvals) <= 1):
            print('# Skipping dimension {} of the models!!'.format(i))
            todel.append(i)
        else:
            notdel.append(i)

    minfo = _np.delete(xdrminfo, todel, axis=1)

    # Atualiza lista de parametros e intervalos
    listpar = []
    for i in range(len(minfo[0])):
        arr = _np.unique(minfo[:, i])
        listpar.append(arr)

    lim_chk = [[_np.min(i), _np.max(i)] for i in listpar]
    i = 0
    for pars in _product(*lim_chk):
        tidx = _np.where((_np.array(pars) == minfo).all(axis=1))
        if _np.size(tidx) == 0:
            print('# IMPORTANT MODEL NOT FOUND:')
            print(pars)
            i += 1
    if i > 0:
        print("# TOTAL OF {} MODELS WEREN'T FOUND!!".format(i))
    else:
        print('# THE MODELS IN XDR ARE OKAY!!')
    return
Пример #22
0
def check_xdr_limits(xdrminfo, todel=[]):
    """ Check if the XDR file contains models for all parameters within their
    **maximum** interval.

    `todel`: list for dimensions to be skipped in the test. 
    `-1` is usually the cossine of the observer's inclination angle. 
    If `dim=-1` has values from 0 to 1, it is automatically skipped.
    """
    todel = list(todel)
    notdel = []               
    for i in range(len(xdrminfo[0])):
        lvals = _np.unique(xdrminfo[:, i])
        if len(lvals) == 1 or (i == len(xdrminfo[0])-1 and 
                _np.min(lvals) >= 0 and _np.max(lvals) <= 1):
            print('# Skipping dimension {} of the models!!'.format(i))
            todel.append(i)
        else:
            notdel.append(i)

    minfo = _np.delete(xdrminfo, todel, axis=1)

    # Atualiza lista de parametros e intervalos
    listpar = []
    for i in range(len(minfo[0])):
        arr = _np.unique(minfo[:, i])
        listpar.append(arr)

    lim_chk = [ [_np.min(i), _np.max(i)] for i in listpar]
    i = 0
    for pars in _product(*lim_chk):
        tidx = _np.where( (_np.array(pars) == minfo).all(axis=1) )
        if _np.size(tidx) == 0:
            print('# IMPORTANT MODEL NOT FOUND:')
            print(pars)
            i += 1
    if i > 0:
        print("# TOTAL OF {} MODELS WEREN'T FOUND!!".format(i))
    else:
        print('# THE MODELS IN XDR ARE OKAY!!')
    return 
Пример #23
0
def subresonances(rgr):
    """ this graph and its lower-spin (more pi-bonded) resonances
    """
    def _inc_range(bnd_cap):
        return tuple(range(0, bnd_cap + 1))

    add_pi_bonds_ = _partial(_add_pi_bonds, rgr)

    atm_keys = list(_atom_keys(rgr))
    bnd_keys = list(_bond_keys(rgr))
    atm_rad_vlcs = _values_by_key(atom_radical_valences(rgr), atm_keys)
    atm_bnd_keys_lst = _values_by_key(_atom_bond_keys(rgr), atm_keys)
    bnd_caps = _values_by_key(_bond_capacities(rgr), bnd_keys)
    bnd_ord_dct = _bond_orders(rgr)

    def _is_valid(bnd_ord_inc_dct):
        # check if radical decrements are more than radical valences
        def __tally(atm_bnd_keys):
            return sum(_values_by_key(bnd_ord_inc_dct, atm_bnd_keys))

        atm_rad_vlc_decs = tuple(map(__tally, atm_bnd_keys_lst))
        enough_elecs = numpy.all(
            numpy.less_equal(atm_rad_vlc_decs, atm_rad_vlcs))
        # check if all bond orders are less than 4 (should only affect C2)
        bnd_inc_keys = bnd_ord_inc_dct.keys()
        bnd_incs = _values_by_key(bnd_ord_inc_dct, bnd_inc_keys)
        bnd_ords = _values_by_key(bnd_ord_dct, bnd_inc_keys)
        new_bnd_ords = numpy.add(bnd_ords, bnd_incs)
        not_too_many = numpy.all(numpy.less(new_bnd_ords, 4))
        return enough_elecs and not_too_many

    def _bond_value_dictionary(bnd_vals):
        return dict(zip(bnd_keys, bnd_vals))

    bnd_ord_incs_itr = _product(*map(_inc_range, bnd_caps))
    bnd_ord_inc_dct_itr = map(_bond_value_dictionary, bnd_ord_incs_itr)
    bnd_ord_inc_dct_itr = filter(_is_valid, bnd_ord_inc_dct_itr)
    rgrs = tuple(sorted(map(add_pi_bonds_, bnd_ord_inc_dct_itr), key=_frozen))
    return rgrs
Пример #24
0
def _get_all_parameters_combinations(parameters):
    """
    Takes a dictionary where the keys are parameter names. The value of a key
    is a list of all possible values parameter.

    Returns a list of all possible parameter combinations. Each parameter set
    is a dictionary.

    For example an input of {'foo':[1,2], 'bar': {'x': ['a','b']}}
    will produce
    [{'foo':1, 'bar':{'x': 'a'}}, {'foo':1, 'bar':{'x': 'b'}},
     {'foo':2, 'bar':{'x': 'a'}}, {'foo':2, 'bar': {'x': 'b'}}]
    """

    # Get all possible combinations
    parameter_names = list(parameters.keys())
    arg_list = []
    for i in parameter_names:
        val = parameters[i]
        if isinstance(val, dict):
            arg_list.append(_get_all_parameters_combinations(val))
        elif not isinstance(val, list):
            arg_list.append([val])
        else:
            arg_list.append(val)
    param_iter = _product(*arg_list)

    # Construct the output
    result = []
    for param_tuple in param_iter:
        param_dict = {}
        for i in range(len(param_tuple)):
            cur_arg_name = parameter_names[i]
            cur_arg_value = param_tuple[i]
            param_dict[cur_arg_name] = cur_arg_value
        result.append(param_dict)

    return result
Пример #25
0
def _get_all_parameters_combinations(parameters):
    """
    Takes a dictionary where the keys are parameter names. The value of a key
    is a list of all possible values parameter.

    Returns a list of all possible parameter combinations. Each parameter set
    is a dictionary.

    For example an input of {'foo':[1,2], 'bar': {'x': ['a','b']}}
    will produce
    [{'foo':1, 'bar':{'x': 'a'}}, {'foo':1, 'bar':{'x': 'b'}},
     {'foo':2, 'bar':{'x': 'a'}}, {'foo':2, 'bar': {'x': 'b'}}]
    """

    # Get all possible combinations
    parameter_names = parameters.keys()
    arg_list = []
    for i in parameter_names:
        val = parameters[i]
        if isinstance(val, dict):
            arg_list.append(_get_all_parameters_combinations(val))
        elif not isinstance(val, list):
            arg_list.append([val])
        else:
            arg_list.append(val)
    param_iter = _product(*arg_list)

    # Construct the output
    result = []
    for param_tuple in param_iter:
        param_dict = {}
        for i in range(len(param_tuple)):
            cur_arg_name = parameter_names[i]
            cur_arg_value = param_tuple[i]
            param_dict[cur_arg_name] = cur_arg_value
        result.append(param_dict)

    return result
def get_all_states(start, end, length):
    odds_gen = _get_odds_cb_gen(start, end, length)
    evens_gen = _get_evens_cb_gen(start, end, length)

    return _product(odds_gen, evens_gen)
Пример #27
0
def _generalised_dot(a, b, shape_out):
    return SymbolicArray(
        [(a[indices] * b).sum()
         for indices in _product(*[range(s) for s in shape_out])], shape_out)
Пример #28
0
def cartesian_product(*iterables):
    # TODO enable overriding for vectors and matrices
    return Set(list(_product(*[tuple(i) for i in iterables])))
Пример #29
0
def segments_intersections(segments: _Sequence[_Segment],
                           *,
                           context: _Optional[_Context] = None
                           ) -> _Dict[_Tuple[int, int], _Intersection]:
    """
    Returns mapping between intersection points
    and corresponding segments indices.

    Based on Bentley-Ottmann algorithm.

    Time complexity:
        ``O(len(segments) * log len(segments) + len(intersections))``
    Memory complexity:
        ``O(len(segments) + len(intersections))``
    Reference:
        https://en.wikipedia.org/wiki/Bentley%E2%80%93Ottmann_algorithm

    :param segments: sequence of segments.
    :param context: geometrical context.
    :returns:
        mapping between intersection points and corresponding segments indices.

    >>> from ground.base import get_context
    >>> context = get_context()
    >>> Point, Segment = context.point_cls, context.segment_cls
    >>> segments_intersections([]) == {}
    True
    >>> segments_intersections([Segment(Point(0, 0), Point(2, 2))]) == {}
    True
    >>> segments_intersections([Segment(Point(0, 0), Point(2, 0)),
    ...                         Segment(Point(0, 2), Point(2, 2))]) == {}
    True
    >>> (segments_intersections([Segment(Point(0, 0), Point(2, 2)),
    ...                          Segment(Point(0, 0), Point(2, 2))])
    ...  == {(0, 1): (Point(0, 0), Point(2, 2))})
    True
    >>> (segments_intersections([Segment(Point(0, 0), Point(2, 2)),
    ...                          Segment(Point(2, 0), Point(0, 2))])
    ...  == {(0, 1): (Point(1, 1),)})
    True
    """
    left_parts_ids, right_parts_ids = {}, {}
    left_tangents, right_tangents = {}, {}
    for event in _sweep(
            segments, context=_get_context() if context is None else context):
        if event.tangents:
            (left_tangents.setdefault(event.start, {}).setdefault(
                event.end,
                set()).update(tangent.end for tangent in event.tangents))
        if event.right.tangents:
            (right_tangents.setdefault(event.end, {}).setdefault(
                event.start,
                set()).update(tangent.end for tangent in event.right.tangents))
        for start, ends_ids in event.parts_ids.items():
            for end, ids in ends_ids.items():
                (left_parts_ids.setdefault(start,
                                           {}).setdefault(end,
                                                          set()).update(ids))
                (right_parts_ids.setdefault(end,
                                            {}).setdefault(start,
                                                           set()).update(ids))
    discrete = {}  # type: _Dict[_Tuple[int, int], _Tuple[_Point]]
    for intersection_point, ends_tangents_ends in left_tangents.items():
        left_intersection_point_ids, right_intersection_point_ids = (
            left_parts_ids.get(intersection_point),
            right_parts_ids.get(intersection_point))
        for end, tangents_ends in ends_tangents_ends.items():
            ids = left_intersection_point_ids[end]
            for tangent_end in tangents_ends:
                tangent_ids = (left_intersection_point_ids[tangent_end]
                               if intersection_point < tangent_end else
                               right_intersection_point_ids[tangent_end])
                ids_pairs = [
                    _to_sorted_pair(id_, tangent_id)
                    for id_, tangent_id in _product(ids -
                                                    tangent_ids, tangent_ids -
                                                    ids)
                ]
                discrete.update(zip(ids_pairs, _repeat(
                    (intersection_point, ))))
    for intersection_point, starts_tangents_ends in right_tangents.items():
        left_intersection_point_ids, right_intersection_point_ids = (
            left_parts_ids.get(intersection_point),
            right_parts_ids.get(intersection_point))
        for start, tangents_ends in starts_tangents_ends.items():
            ids = right_intersection_point_ids[start]
            for tangent_end in tangents_ends:
                tangent_ids = (left_intersection_point_ids[tangent_end]
                               if intersection_point < tangent_end else
                               right_intersection_point_ids[tangent_end])
                ids_pairs = [
                    _to_sorted_pair(id_, tangent_id)
                    for id_, tangent_id in _product(ids -
                                                    tangent_ids, tangent_ids -
                                                    ids)
                ]
                discrete.update(zip(ids_pairs, _repeat(
                    (intersection_point, ))))
    continuous = {}  # type: _Dict[_Tuple[int, int], _Tuple[_Point, _Point]]
    for start, ends_ids in left_parts_ids.items():
        for end, ids in ends_ids.items():
            for ids_pair in _to_pairs_combinations(sorted(ids)):
                if ids_pair in continuous:
                    prev_start, prev_end = continuous[ids_pair]
                    endpoints = min(prev_start, start), max(prev_end, end)
                else:
                    endpoints = (start, end)
                continuous[ids_pair] = endpoints
    return {**discrete, **continuous}
Пример #30
0
def contour_self_intersects(contour: _Contour,
                            *,
                            context: _Optional[_Context] = None) -> bool:
    """
    Checks if contour has self-intersection.

    Based on Bentley-Ottmann algorithm.

    Time complexity:
        ``O(len(contour.vertices) * log len(contour.vertices))``
    Memory complexity:
        ``O(len(contour.vertices))``
    Reference:
        https://en.wikipedia.org/wiki/Sweep_line_algorithm

    :param contour: contour to check.
    :param context: geometrical context.
    :returns: true if contour is self-intersecting, false otherwise.

    .. note::
        Consecutive equal vertices like ``Point(2, 0)`` in

        .. code-block:: python

            Contour([Point(0, 0), Point(2, 0), Point(2, 0), Point(2, 2)])

        will be considered as self-intersection,
        if you don't want them to be treated as such
        -- filter out before passing as argument.

    >>> from ground.base import get_context
    >>> context = get_context()
    >>> Contour, Point = context.contour_cls, context.point_cls
    >>> contour_self_intersects(Contour([Point(0, 0), Point(2, 0),
    ...                                  Point(2, 2)]))
    False
    >>> contour_self_intersects(Contour([Point(0, 0), Point(2, 0),
    ...                                  Point(1, 0)]))
    True
    """
    vertices = contour.vertices
    if len(vertices) < 3:
        raise ValueError(
            'Contour {contour} is degenerate.'.format(contour=contour))
    if not _all_unique(vertices):
        return True
    if context is None:
        context = _get_context()
    segments = context.contour_segments(contour)

    def non_neighbours_disjoint(
            segment_id: int,
            other_segment_id: int,
            last_segment_id: int = len(segments) - 1) -> bool:
        min_edge_id, max_edge_id = _to_sorted_pair(segment_id,
                                                   other_segment_id)
        return (max_edge_id - min_edge_id == 1
                or (min_edge_id == 0 and max_edge_id == last_segment_id))

    return not all(
        event.has_only_relations(_Relation.DISJOINT, _Relation.TOUCH) and all(
            non_neighbours_disjoint(id_, other_id)
            for tangent in [*event.tangents, *event.right.tangents] for id_,
            other_id in _product(event.segments_ids, tangent.segments_ids))
        for event in _sweep(segments, context=context))
Пример #31
0
def surface_elevation(S,
                      time_index,
                      seed=123,
                      frequency_bins=None,
                      phases=None):
    """
    Calculates wave elevation time-series from spectrum
    
    Parameters
    ------------    
    S: pandas DataFrame
        Spectral density [m^2/Hz] indexed by frequency [Hz]
    time_index: numpy array
        Time used to create the wave elevation time-series [s],
        for example, time = np.arange(0,100,0.01)
    seed: int (optional)
        Random seed
    frequency_bins: numpy array or pandas Series (optional)
        Bin widths for frequency of S. Required for unevenly sized bins
    phases: numpy array or pandas DataFrame (optional)
        Explicit phases for frequency components (overrides seed)
        for example, phases = np.random.rand(len(S)) * 2 * np.pi
        
    Returns
    ---------
    eta: pandas DataFrame
        Wave surface elevation [m] indexed by time [s]
    
    """
    try:
        time_index = np.array(time_index)
    except:
        pass
    assert isinstance(S, pd.DataFrame), 'S must be of type pd.DataFrame'
    assert isinstance(time_index, np.ndarray), ('time_index must be of type'
                                                'np.ndarray')
    assert isinstance(seed, (type(None), int)), 'seed must be of type int'
    assert isinstance(
        frequency_bins, (type(None), np.ndarray, pd.DataFrame)), (
            "frequency_bins must be of type None, np.ndarray, or pd,DataFrame")
    assert isinstance(phases, (type(None), np.ndarray, pd.DataFrame)), (
        'phases must be of type None, np.ndarray, or pd,DataFrame')

    if frequency_bins is not None:
        assert frequency_bins.squeeze().shape == frequency_bins.squeeze(
        ).shape, ('shape of frequency_bins must match shape of S')
    if phases is not None:
        assert phases.squeeze().shape == S.squeeze().shape, (
            'shape of phases must match shape of S')

    start_time = time_index[0]
    end_time = time_index[-1]

    f = pd.Series(S.index)
    f.index = f

    if frequency_bins is None:
        delta_f = f.diff()
        #delta_f[0] = f[1]-f[0]

    elif isinstance(frequency_bins, np.ndarray):
        delta_f = pd.Series(frequency_bins, index=S.index)
    elif isinstance(frequency_bins, pd.DataFrame):
        assert len(frequency_bins.columns) == 1, ('frequency_bins must only'
                                                  'contain 1 column')
        delta_f = frequency_bins.squeeze()

    if phases is None:
        np.random.seed(seed)
        phase = pd.DataFrame(2 * np.pi *
                             np.random.rand(S.shape[0], S.shape[1]),
                             index=S.index,
                             columns=S.columns)
    elif isinstance(phases, np.ndarray):
        phase = pd.DataFrame(phases, index=S.index, columns=S.columns)
    elif isinstance(phases, pd.DataFrame):
        phase = phases

    phase = phase[start_time:end_time]  # Should phase, omega, and A*delta_f be
    #   truncated before computation?

    omega = pd.Series(2 * np.pi * f)
    omega.index = f
    omega = omega[start_time:end_time]

    # Wave amplitude times delta f, truncated
    A = 2 * S
    A = A.multiply(delta_f, axis=0)
    A = np.sqrt(A)
    A = A.loc[start_time:end_time, :]

    eta = pd.DataFrame(columns=S.columns, index=time_index)
    for mcol in eta.columns:
        # Product of omega and time
        B = np.array([x * y for x, y in _product(time_index, omega)])
        B = B.reshape((len(time_index), len(omega)))
        B = pd.DataFrame(B, index=time_index, columns=omega.index)

        C = np.real(np.exp(1j * (B + phase[mcol])))
        C = pd.DataFrame(C, index=time_index, columns=omega.index)

        eta[mcol] = (C * A[mcol]).sum(axis=1)

    return eta
Пример #32
0
def point_modal_velocity(omega, x0, grid, L, *, N=None, deltan=0, c=None):
    """Velocity of point source in a rectangular room using a modal room model.

    Parameters
    ----------
    omega : float
        Frequency of source.
    x0 : (3,) array_like
        Position of source.
    grid : triple of array_like
        The grid that is used for the sound field calculations.
        See `sfs.util.xyz_grid()`.
    L : (3,) array_like
        Dimensionons of the rectangular room.
    N : (3,) array_like or int, optional
        Combination of modal orders in the three-spatial dimensions to
        calculate the sound field for or maximum order for all
        dimensions.  If not given, the maximum modal order is
        approximately determined and the sound field is computed up to
        this maximum order.
    deltan : float, optional
        Absorption coefficient of the walls.
    c : float, optional
        Speed of sound.

    Returns
    -------
    `XyzComponents`
        Particle velocity at positions given by *grid*.

    """
    k = _util.wavenumber(omega, c)
    x0 = _util.asarray_1d(x0)
    x, y, z = _util.as_xyz_components(grid)

    if N is None:
        # determine maximum modal order per dimension
        Nx = int(_np.ceil(L[0] / _np.pi * k))
        Ny = int(_np.ceil(L[1] / _np.pi * k))
        Nz = int(_np.ceil(L[2] / _np.pi * k))
        mm = range(Nx)
        nn = range(Ny)
        ll = range(Nz)
    elif _np.isscalar(N):
        # compute up to a given order
        mm = range(N)
        nn = range(N)
        ll = range(N)
    else:
        # compute field for one order combination only
        mm = [N[0]]
        nn = [N[1]]
        ll = [N[2]]

    kmp0 = [((kx + 1j * deltan)**2, _np.sin(kx * x) * _np.cos(kx * x0[0]))
            for kx in [m * _np.pi / L[0] for m in mm]]
    kmp1 = [((ky + 1j * deltan)**2, _np.sin(ky * y) * _np.cos(ky * x0[1]))
            for ky in [n * _np.pi / L[1] for n in nn]]
    kmp2 = [((kz + 1j * deltan)**2, _np.sin(kz * z) * _np.cos(kz * x0[2]))
            for kz in [l * _np.pi / L[2] for l in ll]]
    ksquared = k**2
    vx = 0+0j
    vy = 0+0j
    vz = 0+0j
    for (km0, p0), (km1, p1), (km2, p2) in _product(kmp0, kmp1, kmp2):
        km = km0 + km1 + km2
        vx = vx - 8*1j / (ksquared - km) * p0
        vy = vy - 8*1j / (ksquared - km) * p1
        vz = vz - 8*1j / (ksquared - km) * p2
    return _util.XyzComponents([vx, vy, vz])
def _get_combinations(extrema):
    Q_list = [tuple(range(d[0], d[1] + 1)) for d in extrema]
    return set(_product(*Q_list))
Пример #34
0
def point_modal(omega, x0, grid, L, *, N=None, deltan=0, c=None):
    """Point source in a rectangular room using a modal room model.

    Parameters
    ----------
    omega : float
        Frequency of source.
    x0 : (3,) array_like
        Position of source.
    grid : triple of array_like
        The grid that is used for the sound field calculations.
        See `sfs.util.xyz_grid()`.
    L : (3,) array_like
        Dimensionons of the rectangular room.
    N : (3,) array_like or int, optional
        For all three spatial dimensions per dimension maximum order or
        list of orders. A scalar applies to all three dimensions. If no
        order is provided it is approximately determined.
    deltan : float, optional
        Absorption coefficient of the walls.
    c : float, optional
        Speed of sound.

    Returns
    -------
    numpy.ndarray
        Sound pressure at positions given by *grid*.

    """
    k = _util.wavenumber(omega, c)
    x0 = _util.asarray_1d(x0)
    x, y, z = _util.as_xyz_components(grid)

    if _np.isscalar(N):
        N = N * _np.ones(3, dtype=int)

    if N is None:
            N = [None, None, None]

    orders = [0, 0, 0]
    for i in range(3):
        if N[i] is None:
            # compute max order
            orders[i] = range(int(_np.ceil(L[i] / _np.pi * k) + 1))
        elif _np.isscalar(N[i]):
            # use given max order
            orders[i] = range(N[i] + 1)
        else:
            # use given orders
            orders[i] = N[i]

    kmp0 = [((kx + 1j * deltan)**2, _np.cos(kx * x) * _np.cos(kx * x0[0]))
            for kx in [m * _np.pi / L[0] for m in orders[0]]]
    kmp1 = [((ky + 1j * deltan)**2, _np.cos(ky * y) * _np.cos(ky * x0[1]))
            for ky in [n * _np.pi / L[1] for n in orders[1]]]
    kmp2 = [((kz + 1j * deltan)**2, _np.cos(kz * z) * _np.cos(kz * x0[2]))
            for kz in [l * _np.pi / L[2] for l in orders[2]]]
    ksquared = k**2
    p = 0
    for (km0, p0), (km1, p1), (km2, p2) in _product(kmp0, kmp1, kmp2):
        km = km0 + km1 + km2
        p = p + 8 / (ksquared - km) * p0 * p1 * p2
    return p
Пример #35
0
from collections import namedtuple as _namedtuple
from itertools import product as _product
import sys as _sys

__author__ = "Antony Lee"
__version__ = "0.2"
__fullname__ = "redeal v. {}".format(__version__)
__copyright__ = "{}, (c) {}".format(__fullname__, __author__)

SEATS = list("NESW")
LONG_SEATS = ["North", "East", "South", "West"]
N_SEATS = len(SEATS)
SUITS = list("SHDC")
STRAINS = SUITS + ["N"]
SUITS_SYM_UNICODE = list("♠♡♢♣")
SUITS_SYM = (list("♠♡♢♣") if _sys.getdefaultencoding() == "utf-8" else
             [" S", " H", " D", " C"])
N_SUITS = len(SUITS)
RANKS = list("AKQJT98765432")
HCP = list(map(int, "4321000000000"))
QP = list(map(int, "3210000000000"))
PER_SUIT = 13
Card = _namedtuple("Card", ["suit", "rank"])
Card.from_str = lambda s: Card(SUITS.index(s[0].upper()),
                               RANKS.index(s[1].upper()))
Card.__str__ = lambda self: SUITS_SYM[self.suit] + RANKS[self.rank]
FULL_DECK = {
    Card(suit=suit, rank=rank)
    for suit, rank in _product(range(N_SUITS), range(PER_SUIT))
}
Пример #36
0
# vim: set fileencoding=utf-8
from __future__ import division, print_function, unicode_literals
from collections import namedtuple as _namedtuple
from itertools import product as _product
import sys as _sys


__author__ = "Antony Lee"
__version__ = "0.2"
__fullname__ = "redeal v. {}".format(__version__)
__copyright__ = "{}, (c) {}".format(__fullname__, __author__)

SEATS = list("NESW")
LONG_SEATS = ["North", "East", "South", "West"]
N_SEATS = len(SEATS)
SUITS = list("SHDC")
STRAINS = SUITS + ["N"]
SUITS_SYM_UNICODE = list("♠♡♢♣")
SUITS_SYM = (list("♠♡♢♣") if _sys.getdefaultencoding() == "utf-8"
             else [" S", " H", " D", " C"])
N_SUITS = len(SUITS)
RANKS = list("AKQJT98765432")
PER_SUIT = 13
Card = _namedtuple("Card", ["suit", "rank"])
Card.from_str = lambda s: Card(SUITS.index(s[0].upper()),
                               RANKS.index(s[1].upper()))
Card.__str__ = lambda self: SUITS_SYM[self.suit] + RANKS[self.rank]
Card.__format__ = lambda self, fmt: format(str(self), fmt)
FULL_DECK = {Card(suit=suit, rank=rank)
             for suit, rank in _product(range(N_SUITS), range(PER_SUIT))}
Пример #37
0
def surface_elevation(S, time_index, seed=123):
    """
    Calculates wave elevation time-series from spectrum using a random phase
    
    Parameters
    ------------    
    S: pandas DataFrame
        Spectral density [m^2/Hz] indexed by frequency [Hz]
    time_index: numpy array
        Time used to create the wave elevation time-series [s],
        for example, time = np.arange(0,100,0.01)
    seed: int (optional)
        Random seed
        
    Returns
    ---------
    eta: pandas DataFrame
        Wave surface elevation [m] indexed by time [s]
    
    """
    try:
        time_index = np.array(time_index)
    except:
        pass
    assert isinstance(S, pd.DataFrame), 'S must be of type pd.DataFrame'
    assert isinstance(time_index,
                      np.ndarray), 'time_index must be of type np.ndarray'
    assert isinstance(seed, int), 'seed must be of type int'

    np.random.seed(seed)

    start_time = time_index[0]
    end_time = time_index[-1]

    f = pd.Series(S.index)  # frequency
    f.index = f
    delta_f = f.diff()

    phase = pd.Series(2 * np.pi * np.random.rand(f.size))
    phase.index = f
    phase = phase[start_time:end_time]  # Should phase, omega, and A*delta_f be
    #   truncated before computation?
    omega = pd.Series(2 * np.pi * f)  # angular freqency
    omega.index = f
    omega = omega[start_time:end_time]

    # Wave amplitude times delta f, truncated
    A = 2 * S
    A = A.multiply(delta_f, axis=0)
    A = np.sqrt(A)
    A = A.loc[start_time:end_time, :]

    # Product of omega and time
    B = np.array([x * y for x, y in _product(time_index, omega)])
    B = B.reshape((len(time_index), len(omega)))
    B = pd.DataFrame(B, index=time_index, columns=omega.index)

    C = np.real(np.exp(1j * (B + phase)))
    C = pd.DataFrame(C, index=time_index, columns=omega.index)

    eta = pd.DataFrame(columns=S.columns, index=time_index)
    for col in A.columns:
        eta[col] = (C * A[col]).sum(axis=1)

    return eta