Exemple #1
0
def fusionMur(piece,obstacles,seuil):
    """
    Fusionne les obstacles trop proches des murs de la piece selon le seuil

    Retourne les nouveaux murs et les nouveaux obstacles

    :param piece: Murs de la piece
    :type piece: numpy.array
    :param obstacles: Liste des obstacles à fusionner
    :type obstacles: list
    :param seuil: Distance à respecter
    :type seuil: float
    :rtype: list,list
    """
    newobstacles = []
    cache = []
    newpiece = cp(piece)

    for obs in obstacles:
        distmin,indminobs,indminpiece = inddistMin(obs,newpiece)
        if distmin<2*seuil:
            pc = newpiece.tolist()
            first = cp(pc[:indminpiece+1])
            last = cp(pc[indminpiece:])
            o = obs.tolist()
            line = genLigne(obs[indminobs],pc[indminpiece],5)
            first.extend(o)
            first.extend(line.tolist())
            first.extend(last)
            newpiece = np.array(first)
        else:
            newobstacles.append(cp(obs))
    return newpiece,newobstacles
Exemple #2
0
def get_source(name, version=None, copy=False):
    """Retrieve a Source from the registry by name.

    Parameters
    ----------
    name : str
        Name of source in the registry.
    version : str, optional
        Version identifier for sources with multiple versions. Default is
        `None` which corresponds to the latest, or only, version.
    copy : bool, optional
        If True and if `name` is already a Source instance, return a copy of
        it. (If `name` is a str a copy of the instance
        in the registry is always returned, regardless of the value of this
        parameter.) Default is False.
    """
    
    # If we need to retrieve from the registry, we want to return a shallow
    # copy, in order to keep the copy in the registry "pristene". However, we
    # *don't* want a shallow copy otherwise. Therefore,
    # we need to check if `name` is already an instance of Model before 
    # going to the registry, so we know whether or not to make a shallow copy.
    if isinstance(name, Source):
        if copy:
            return cp(name)
        else:
            return name
    else:
        return cp(registry.retrieve(Source, name, version=version))
Exemple #3
0
def calculerLigneAgr(ligne,formes,pointsdefo,seuil):
    """
    Fonction utilisée pour calculer le chemin à emprunter grâce à des obstacles agrandis.

    Tant que la ligne passe par un obstacle agrandi, on définit une cible de déformation pour l'obstacle et on applique l'algoritheme de déformation

    Si l'on effectue trop de déformations pour une forme (limite à 5 par défaut), on arrête. Pour faire varier cette limite, modifier la ligne 208

    Il est possible de faire varier les paramètres de l'algorithme de déformation en modifiant les lignes 226 et 230 du fichier ( par défaut: newl = algo.algo(obs,newobs,l,50,0.6,0.5) )

    Retourne les points du chemin final

    :param ligne:   Tableau des points de la ligne
    :type ligne: numpy.array
    :param formes:   Liste des formes, chaque forme étant un np.array() de points
    :type formes: list
    :param pointsdefo:   Centres de déformation pour les obstacles faisant partie des murs de la pièce 
    :type pointsdefo: list
    :param seuil:   Distance utilisée pour ignorer certains points du début et de la fin du chemin, dans le cas où le départ ou l'arrivée sont inclus dans un obstacle   
    :type seuil: float       
    :rtype: nump.array

    """
    y = time.time()

    nbdefo = [0 for forme in formes]

    l = cp(ligne)
    if len(formes)>0:
        bloque,bloquants = tools.obsbloquants(tools.ignorerPoints(l,2*seuil),formes)
        while bloque:
            if not np.all(np.less(nbdefo,5)):
                    break
            for i,obs in enumerate(cp(formes)):
                if not bloque:
                    break
                if i in bloquants:
                    nbdefo[i] += 1
                    #obstacles de murs
                    if len(pointsdefo[i])>0:
                        points = cp(pointsdefo[i])
                        dists = [np.amin(tools.distMin_point(l,pt)) for pt in points]
                        point = points[np.argmax(dists)]

                        newobs = np.copy(obs)
                        coef = tools.defomin(l,obs,point)

                        if coef!=0.0:
                            newobs = tools.deformerPourc(obs,point,coef)
                            newl = algo.algo(obs,newobs,l,50,0.7,0.5)
                            l = np.copy(newl)
                    else:
                        newobs,coef = tools.genCible(l,obs)
                        newl = algo.algo(obs,newobs,l,50,0.7,0.5)
                        l = np.copy(newl)

                    formes[i] = cp(obs)

                    bloque,bloquants = tools.obsbloquants(tools.ignorerPoints(l,2*seuil),formes)
    return l
Exemple #4
0
    def copy(self, new_owner=None):
        result = cp(self)

        if new_owner is not None:
            result.owner = new_owner

        return result
Exemple #5
0
 def getrecords(self, url, username=None, password=None, limit=None):
     while url:
         response = self._connect(url)
         headers = response.headers
         source = cp(url)
         self.lasturl = source
         xcount = headers['X-Total-Count'] if 'X-Total-Count' in headers else ''
         if 'Link' in headers:
             links = headers['Link'].split(',')
             for link in links:
                 if 'rel="next"' in link:
                     url = link.split(';')[0][1:-1]
                     break
                 else:
                     url = None
             try:
                 limit = ast.literal_eval(limit)
             except:
                 limit = 0
             if limit != 0:
                 nextbatch = re.search('sysparm_offset=([^?>\s]+)', url)
                 nextbatch = int(nextbatch.group(1)) if nextbatch else 0
                 if xcount > 10000 or nextbatch > limit:
                     url = None
         else:
             url = None
         if response.status_code == 200:
             results = response.json()
             if 'result' in results:
                 for result in results['result']:
                     if xcount:
                         result['X-Total-Count'] = xcount
                     result['source'] = source
                     yield result
Exemple #6
0
    def copy(self):
        result = super().copy()
        assert isinstance(result, type(self))

        # 1. Copy mutable attributes.
        # 1.1. Copy deathrattles.
        assert not self.dr_list or self.dr_list[0] is self.dr_trigger, \
            'The assumption that the first element of ``dr_list`` is ``dr_trigger`` is violated'
        new_dr_list = []
        for i, t in enumerate(result.data['dr_list']):
            # [NOTE]: For the first trigger (owned trigger), also change the owner.
            new_dr_list.append(t.copy(new_owner=result if i == 0 else None, new_target=result))
        result.data['dr_list'] = new_dr_list
        if new_dr_list:
            result.data['dr_trigger'] = new_dr_list[0]
        else:
            result.data['dr_trigger'] = None
        # 1.2. Copy races.
        if 'race' in self.entity_data:
            result.data['race'] = cp(result.data['race'])

        # 2. Copy enchantments. [NOTE]: Aura effects are not copied.
        result.enchantments = [e.copy(new_target=result) for e in result.enchantments]
        result.aura_enchantments = []

        return result
Exemple #7
0
def fusionAgr(obstacles):
    """
    Fusionne les obstacles agrandis

    Retourne la nouvelle liste des obstacles

    :param obstacles: Liste des obstacles à fusionner
    :type obstacles: list
    :param seuil: Distance à respecter
    :type seuil: float

    :rtype: list
    """
    newobstacles = cp(obstacles)
    cache = []
  
    i = 0  

    while i <len(newobstacles):
        obs1 = cp(newobstacles[i])
        for j,obs2 in enumerate(newobstacles):
            if i!=j and (bloque(obs2,[obs1]) or bloque(obs1,[obs2])) :
                pdehors1 = np.array(dehorspts(obs1,obs2))
                pdehors2 = np.array(dehorspts(obs2,obs1))
                if len(pdehors1)>0 or len(pdehors2)>0:
                    #disp.afficher([],[],obs1,obs2)
                    #disp.afficher([],[],pdehors1,pdehors2)
                    distmin,indmin1,indmin2 = inddistMin(pdehors1,pdehors2)
                    newobs = pdehors1.tolist()
                    first = cp(newobs[:indmin1+1])
                    last = cp(newobs[indmin1:])
                    o = pdehors2.tolist()
                    line = genLigne(o[indmin2],newobs[indmin1],1)
                    first.extend(o)
                    first.extend(line)
                    first.extend(last)
                    newobs = np.array(cp(first))
                    newobstacles[i] = newobs
                    #disp.afficher([],[],newobstacles[j],newobstacles[i])

                    del newobstacles[j]
                    i = 0
                    break
        i += 1
    return newobstacles
Exemple #8
0
    def __copy__(self):
        """
        Copies the board faster than deepcopy

        :rtype: Board
        """
        return Board([[cp(piece) or None
                       for piece in self.position[index]]
                      for index, row in enumerate(self.position)])
    def setUp(self):
        from ddbmock.database.db import dynamodb
        from ddbmock.database.table import Table
        from ddbmock.database.key import PrimaryKey

        dynamodb.hard_reset()

        hash_key = PrimaryKey(TABLE_HK_NAME, TABLE_HK_TYPE)
        range_key = PrimaryKey(TABLE_RK_NAME, TABLE_RK_TYPE)

        self.t1 = Table(TABLE_NAME, TABLE_RT, TABLE_WT, hash_key, range_key)
        self.t2 = Table(TABLE_NAME2, TABLE_RT, TABLE_WT, hash_key, None)

        dynamodb.data[TABLE_NAME] = self.t1
        dynamodb.data[TABLE_NAME2] = self.t2

        self.t1.put(cp(ITEM), {})
        self.t2.put(cp(ITEM2), {})
 def concordance(self, matches, focus, prev_onset, space):
     if focus == len(self.pattern):
         return [matches]
     _, regex = self.pattern[focus]
     _, start, end = self.intervals[focus - 1]
     area = space[prev_onset + start:prev_onset + end]
     new_matches = []
     for i, token in enumerate(area):
         if regex.match(token):
             new_match = [(i + prev_onset + start, token)]            
             if self.is_last_position(focus):
                 new_matches.append(cp(matches) + new_match)
             else:
                 forward_matches = cp(matches) + new_match
                 onset = prev_onset + i + 1
                 newer_matches = self.concordance(
                     forward_matches, focus + 1, onset, space
                 )
                 new_matches += newer_matches
     return new_matches
Exemple #11
0
    def advantage_as_result(self, move, val_scheme):
        """
        Calculates advantage after move is played

        :type: move: Move
        :type: val_scheme: PieceValues
        :rtype: double
        """
        test_board = cp(self)
        test_board.update(move)
        return test_board.material_advantage(move.color, val_scheme)
Exemple #12
0
def dyeing(res0, p0, rotate, index, cube, mul=1):
    """
    Args:
        res0: 结果
        p0: 当前节点
        rotate: 旋转方向,取值0,1,2
    """
    p = p0
    e = edges[index]
    tmp = p[rotate]+e*mul
    if tmp < 0 or tmp > 2:
        return

    c = cp(cube)
    unit_mul = 1 if mul > 0 else -1
    unit = rotate_unit[rotate]
    p = tuple(x+y*unit_mul for x, y in zip(p, unit))
    if c[p] == 1:
        return False

    c[p] = 1
    new_point = [p]
    if e == 2:
        p = tuple(x+y*unit_mul for x, y in zip(p, unit))
        if c[p] == 1:
            return False
        c[p] = 1
        new_point.append(p)

    global total
    res = cp(res0)
    if index == n-1:
        total += 1
        res.append('%s -> %s%s%d -> %s' % (p0, label[rotate], '+' if mul > 0 else '-', edges[index], new_point))
        print('*'*40, total)
        print("\n".join(res))
        return

    res.append('%s -> %s%s%d -> %s' % (p0, label[rotate], '+' if mul > 0 else '-', edges[index], new_point))
    add(res, index+1, p, c)
Exemple #13
0
    def in_check_as_result(self, pos, move):
        """
        Finds if playing my move would make both kings meet.

        :type: pos: Board
        :type: move: Move
        :rtype: bool
        """
        test = cp(pos)
        test.update(move)
        test_king = test.get_king(move.color)

        return self.loc_adjacent_to_opponent_king(test_king.location, test)
Exemple #14
0
    def copy(self):
        """Copy the entity.

        [NOTE]: Subclasses should override this method in subclasses to handle special cases such as ``dr_list``.

        :return: The copied entity.
        """
        # TODO: How to make this method more "automatically"?
        # TODO: Need test: need to call ``_reset_tags``, ``set_zp`` and other init methods or not?

        # 1. Create a shallow copy. Immutable attributes are copied automatically.
        result = cp(self)

        # 2. Copy data. Only shallow copy the top-level (entity-level) data, other part (cls_data) are remain shared.
        result.data = result.data.parents.new_child(cp(result.data.maps[0]))

        # 3. Copy triggers and auras.
        result.triggers = {t.copy(new_owner=result) for t in result.triggers}
        result.update_triggers(Zone.Invalid, result.zone)
        result.auras = {a.copy(new_owner=result) for a in result.auras}
        result.update_auras(Zone.Invalid, result.zone)

        return result
def fill_dict_with_defaults(dic):
    """
    Helper function, which sets missing values for a given dictionary containing information about the distribution to test.

    Argument:
    :param dic: dictionary containing information about the distribution to test.
    :type dic:  dictionary

    Output:
    :returns: dictionary with missing values filled in.
    :rtype:   dictionary
    """
    RD = cp(dic)
    try:
        if isinstance(RD['dist'](),RD['dist']):
            RD['dist']=RD['dist']()
    except TypeError:
        pass
    except:
        raise AssertionError('Distribution %s does not seem to support a default distribution (called with no arguments) ' %(RD['dist']))
    if not RD['dist'].param.has_key('n'):
        n=1
    else:
        n=RD['dist']['n']
    if not 'nsamples' in dic.keys():
        RD['nsamples']=500000
    if not 'tolerance' in dic.keys():
        RD['tolerance']=1e-01
    if not 'support' in dic.keys():
        RD['support'] = (-np.inf,np.inf)
    if not 'proposal_low' in dic.keys():
        if RD['support'][1]-RD['support'][0]<np.inf:
            RD['proposal_low']=Uniform({'n':n,
                                    'low':RD['support'][0],
                                    'high':RD['support'][1]})
        elif RD['support'][0]==0 and RD['support'][1]==np.inf:
            RD['proposal_low']=Gamma({'u':1.,'s':2.})
        else:
            RD['proposal_low']=Gaussian({'n':n,'sigma':np.eye(n)*0.8})
    if not 'proposal_high'  in dic.keys():
        if RD['support'][1]-RD['support'][0]<np.inf:
            RD['proposal_high']=Uniform({'n':n,
                                    'low':RD['support'][0],
                                    'high':RD['support'][1]})
        elif RD['support'][0]==0 and RD['support'][1]==np.inf:
            RD['proposal_high']=Gamma({'u':3.,'s':2.})
        else:
            RD['proposal_high']= Gaussian({'n':n,'sigma':np.eye(n)*10})

    return RD
Exemple #16
0
def fusion(obstacles,seuil):
    """
    Fusionne les obstacles trop proches selon le seuil

    Retourne la nouvelle liste des obstacles

    :param obstacles: Liste des obstacles à fusionner
    :type obstacles: list
    :param seuil: Distance à respecter
    :type seuil: float

    :rtype: list
    """
    newobstacles = cp(obstacles)
    i = 0

    while i <len(newobstacles):
        obs1 = newobstacles[i] 
        for j,obs2 in enumerate(newobstacles):
            if i!=j :
                distmin,indminobs1,indminobs2 = inddistMin(obs1,obs2)
                if distmin<2*seuil:
                    newobs = obs1.tolist()
                    first = cp(newobs[:indminobs1+1])
                    last = cp(newobs[indminobs1:])
                    o = obs2.tolist()
                    line = genLigne(o[indminobs2],newobs[indminobs1],5)
                    first.extend(o)
                    first.extend(line)
                    first.extend(last)
                    newobs = np.array(cp(first))
                    newobstacles[i] = newobs
                    del newobstacles[j]
                    i = -1
                    break
        i += 1
    return newobstacles
    def _add_source(self, source):
        '''
        Adds a source if no valid source is present.

        @param source A valid ordered list of some type.
        @type source (str, unicode, list, dict, tuple)
        '''
        # Input Validation
        if not isinstance(source, (str, unicode, list, tuple, dict)):
            raise TypeError('Source must be an ordered list or string, given '+\
                            type(source).__name__)
        
        self._source = cp(source)
        self._source_by_state = [[] for x in range(0, len(source))]
        self._valid_source = True
Exemple #18
0
    def sample_table(self):
        """
        Return (possibly first parsing/building) the table of samples.

        :return pandas.core.frame.DataFrame | NoneType: table of samples'
            metadata, if one is defined
        """
        from copy import copy as cp
        key = NAME_TABLE_ATTR
        attr = "_" + key
        if self.get(attr) is None:
            sheetfile = self[METADATA_KEY].get(key)
            if sheetfile is None:
                return None
            self[attr] = self.parse_sample_sheet(sheetfile)
        return cp(self[attr])
    def test_update_return_updated_old(self):
        from ddbmock import connect_boto_patch
        from boto.dynamodb.exceptions import DynamoDBValidationError

        key = {u"HashKeyElement": {TABLE_HK_TYPE: HK_VALUE}}
        ADD_VALUE = 1

        db = connect_boto_patch()
        expected = {FIELD_NUM_NAME: cp(ITEM2[FIELD_NUM_NAME])}

        # regular increment
        ret = db.layer1.update_item(TABLE_NAME2, key, {
                FIELD_NUM_NAME: {'Action': 'ADD', u'Value': {u'N': unicode(ADD_VALUE)}},
            },
            return_values=u'UPDATED_OLD',
         )
        self.assertEqual(expected, ret[u'Attributes'])
Exemple #20
0
    def subsample_table(self):
        """
        Return (possibly first parsing/building) the table of subsamples.

        :return pandas.core.frame.DataFrame | NoneType: table of subsamples'
            metadata, if the project defines such a table
        """
        from copy import copy as cp
        key = SAMPLE_SUBANNOTATIONS_KEY
        attr = "_" + key
        if self.get(attr) is None:
            sheetfile = self[METADATA_KEY].get(key)
            if sheetfile is None:
                return None
            self[attr] = pd.read_csv(sheetfile,
                sep=infer_delimiter(sheetfile), **READ_CSV_KWARGS)
        return cp(self[attr])
Exemple #21
0
    def no_moves(self, input_color):

        # Loops through columns
        for piece in self:

            # Tests if square on the board is not empty
            if piece is not None and piece.color == input_color:

                for move in piece.possible_moves(self):

                    test = cp(self)
                    test.update(move)

                    if not test.get_king(input_color).in_check(test):
                        return False

        return True
 def __call__(self, tokens):
     focus = 0
     target = self.pattern[focus]
     _, regex = target
     matches = []
     for i, token in enumerate(tokens):
         if regex.match(token):
             onset = i + 1
             match = [(i, token)]
             _matches = self.concordance(
                 cp(match), focus + 1, onset, tokens
             )
             for _match in _matches:
                 start = _match[0][0]
                 end = _match[-1][0]
                 window = self.frame(tokens, start, end)
                 matches.append(' '.join(window))
     return matches
Exemple #23
0
def initialize(qbitAdj):
    '''Initialise routing solver. Only call once per embedding trial'''
    global _paths, _allPaths, _curr_used, _is_shared
    global _is_used, _active, _qbitAdj, _hist_cost, _sharing_cost

    _qbitAdj = cp(qbitAdj)

    _paths, _allPaths = [], {}
    _curr_used, _is_shared, _is_used, _active, _hist_cost = {}, {}, {}, {}, {}

    _sharing_cost = 1.0

    for key in qbitAdj:
        _curr_used[key] = False
        _is_shared[key] = False
        _is_used[key] = 0
        _active[key] = True
        _hist_cost[key] = 0
    def test_update_return_all_old(self):
        from ddbmock import connect_boto_patch
        from boto.dynamodb.exceptions import DynamoDBValidationError

        key = {u"HashKeyElement": {TABLE_HK_TYPE: HK_VALUE}}
        ADD_VALUE = 1

        db = connect_boto_patch()
        expected = cp(ITEM2)

        # regular increment
        ret = db.layer1.update_item(
            TABLE_NAME2,
            key,
            {FIELD_NUM_NAME: {"Action": "ADD", u"Value": {u"N": unicode(ADD_VALUE)}}},
            return_values=u"ALL_OLD",
        )
        self.assertEqual(expected, ret[u"Attributes"])
Exemple #25
0
    def iniflag(self, img):
        """ Calculate clipped rms of every channel, and then median and clipped rms of this rms distribution.
        Exclude channels where rms=0 (all pixels 0 or blanked) and of the remaining, if outliers beyond 5 sigma
        are less then 10 % of number of channels, flag them. This is done only when flagchan_rms = True.
        If False, only rms=0 (meaning, entire channel image is zero or blanked) is flagged."""

        image = img.image_arr
        nchan = image.shape[1]
        iniflags = N.zeros(nchan, bool)
        zeroflags = N.zeros(nchan, bool)
        crms = img.channel_clippedrms

        for ichan in range(nchan):
            if crms[ichan] == 0: zeroflags[ichan] = True
        iniflags = cp(zeroflags)

        if img.opts.flagchan_rms:
            iniflags = self.flagchans_rmschan(crms, zeroflags, iniflags, 4.0)

        return iniflags
Exemple #26
0
    def add_effect(self, effect, name, frame):
        """
        Add a PropagationEffect to the model.

        Parameters
        ----------
        name : str
            Name of the effect.
        effect : `~sncosmo.PropagationEffect`
            Propagation effect.
        frame : {'rest', 'obs'}
        """
        if not isinstance(effect, PropagationEffect):
            raise TypeError('effect is not a PropagationEffect')
        if frame not in ['rest', 'obs']:
            raise ValueError("frame must be one of: {'rest', 'obs'}")
        self._effects.append(cp(effect))
        self._effect_names.append(name)
        self._effect_frames.append(frame)
        self._synchronize_parameters()
def check_dldx(dic):
    """
    Checks the gradient wrt to the data of the
    distribution under test, if such function is provided by that
    distribution. To this end a single data point is sampled and the
    gradient is compared with the finite difference approximation to
    the graident. An absolute error of the specified tolerance (within
    the dictionary) is allowed to pass the test.

    Argument:
    :param dic: dictionary containing the distribution to test, also tolerance has to be specified
    :type dic : dictionary

    """

    dic = fill_dict_with_defaults(dic)
    d = dic['dist']
    havedldx=True
    try:
        data = d.sample(3)
        d.dldx(data)
    except AbstractError:
        havedldx=False
    if havedldx:
        data_copy = cp(data)
        def f(X):
            data_copy.X = X.reshape(data_copy.X.shape)
            return d.loglik(data_copy)
        # def df(X):
        #     data_copy.X = X.reshape(data_copy.X.shape)
        #     return d.dldx(data_copy)
        X0 = data.X
        df_num = approx_data_fprime(X0,f,epsilon=1e-08)
        df_ana = d.dldx(data)

        err = df_num - df_ana

        # err = check_grad(f,df,X0)
        assert (err<dic['tolerance']).all()
    else:
        assert True
def propagate(u0_orig):
    
    # Diffusion
    u0 = cp(u0_orig)
    un = np.zeros(np.shape(u0))
    un[1:-1, 1:-1] = u0[1:-1, 1:-1] + (     (u0[2:, 1:-1] - 2*u0[1:-1, 1:-1] + u0[:-2, 1:-1])*Dxeff +     (u0[1:-1, 2:] - 2*u0[1:-1, 1:-1] + u0[1:-1, :-2])*Dyeff )

    # Dirichlet outer boundary
    un[[0,-1],:]=udirichlet
    un[:,[0,-1]]=udirichlet
    
    # Neumann inner boundary
    un[ixbot-1,iybox] = u0[ixbot-1,iybox] +(u0[ixbot-2,iybox] - u0[ixbot-1,iybox])*Dxeff -gneumanneff
    un[ixtop+1,iybox] = u0[ixtop+1,iybox] +(u0[ixtop+2,iybox] - u0[ixtop+1,iybox])*Dxeff -gneumanneff
    un[ixbox,iylft-1] = u0[ixbox,iylft-1] +(u0[ixbox,iylft-2] - u0[ixbox,iylft-1])*Dxeff -gneumanneff
    un[ixbox,iyrgt+1] = u0[ixbox,iyrgt+1] +(u0[ixbox,iyrgt+2] - u0[ixbox,iyrgt+1])*Dxeff -gneumanneff
    
    # Also zero-out inside the box (this is just aesthetic)
    un = fillin(un,ixbox, iybox)
    
    return un
Exemple #29
0
def expandPath(unavailable = []):
    '''Expand lowest cost path'''
    global _paths, _is_used, _curr_used, _qbitAdj

    # select expanding path, remove path from list of paths
    path = _paths.pop(0)
    # possible extensions
    extensions = [qb for qb in _qbitAdj[path[-1]] if not _curr_used[qb]]

    new_paths = []
    for qbit in extensions:
        _curr_used[qbit] = True
        # new path
        temp_new = cp(path)
        temp_new.append(qbit)
        # new cost
        _is_used[qbit] += 1            # cost including new qbit
        temp_new[0] += nodeCost(qbit)
        _is_used[qbit] -= 1            # qbit only used if goal reached
        # add extended paths to list of paths
        new_paths.append(temp_new)

    _paths += new_paths
    return new_paths
Exemple #30
0
    def _calc_all_possible_moves(self, input_color):
        """
        Returns list of all possible moves

        :type: input_color: Color
        :rtype: list
        """
        for piece in self:

            # Tests if square on the board is not empty
            if piece is not None and piece.color == input_color:

                for move in piece.possible_moves(self):

                    test = cp(self)
                    test_move = Move(end_loc=move.end_loc,
                                     piece=test.piece_at_square(move.start_loc),
                                     status=move.status,
                                     start_loc=move.start_loc,
                                     promoted_to_piece=move.promoted_to_piece)
                    test.update(test_move)

                    if self.king_loc_dict is None:
                        yield move
                        continue

                    my_king = test.piece_at_square(self.king_loc_dict[input_color])

                    if my_king is None or \
                            not isinstance(my_king, King) or \
                            my_king.color != input_color:
                        self.king_loc_dict[input_color] = test.find_king(input_color)
                        my_king = test.piece_at_square(self.king_loc_dict[input_color])

                    if not my_king.in_check(test):
                        yield move
Exemple #31
0
#!/usr/bin/python

from algorithm import isPerfect
from algorithm import isPrime
from algorithm import divisorSum
from algorithm import printNum
from algorithm import cocktailShaker
from algorithm import exchangeSortReversed

from copy import deepcopy as cp

l = [9, 3, 7, 45, 5, 11, 8, 423, 25, 63]

print("IsPerfect(6): " + str(isPerfect(6)))
print("IsPerfect(28): " + str(isPerfect(28)))
print("IsPerfect(12): " + str(isPerfect(12)))
print("IsPerfect(10): " + str(isPerfect(10)))
print
print("IsPrime(2): " + str(isPrime(2)))
print("IsPrime(17): " + str(isPrime(17)))
print("IsPrime(36): " + str(isPrime(36)))
print
print("printNum(17):")
printNum(17)
print
print("cocktailShaker(l):")
cocktailShaker(cp(l), True)
print
print("exchangeSortReversed(l):")
exchangeSortReversed(cp(l), True)
    print("pt. lengths: ", len(x1pts), len(x2pts), len(y1pts), len(y2pts))

fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_facecolor('black')
ax.set_aspect(aspect='equal')
fig.patch.set_facecolor('black')

# Initialize the lines to be plotted
pen_lines = []
trail1_lines = []
trail2_lines = []
for k in range(0, total):
    trail1_line, = ax.plot([], [], lw=1)
    trail2_line, = ax.plot([], [], lw=1)
    trail1_lines.append(cp(trail1_line))
    trail2_lines.append(cp(trail2_line))
    pen_line, = ax.plot([], [], color='white', lw=3)
    pen_lines.append(cp(pen_line))


def init():
    """ Set the axes limits. """
    l = 0
    if (params[2] > params[3]):
        l = params[2]
    else:
        l = params[3]

    ax.set_ylim(-2 * l * 1.1, 2 * l * 1.1)
    ax.set_xlim(-2 * l * 1.1, 2 * l * 1.1)
Exemple #33
0
def placeCell(cell):
    '''Attempt to find a suitable qbit to place input cell on.

    inputs: cell(int)		: source index of cell to place

    output: qbit(tuple)		: 4-tup for qbit to assign to cell
            paths(list)		: list of paths from placed cell qubits to
                              qbit
    '''

    global _qbitAdj, _source, _qubits
    global _cell_flags, _qbit_flags, _reserved, _vacancy

    log('\n' + '#' * 30 + '\n')
    log('Placing cell: %s\n' % str(cell))

    ### Initialise

    seam_flag = False
    qbit = None

    # find qubits for placed adjacent cells
    adj_qbits = [_qubits[c] for c in _source[cell] if _cell_flags[c]['placed']]
    log('Adjacent qbits: %s\n' % str(adj_qbits))

    # find required availability of target qbit
    avb = len(_source[cell])
    log('Required availability: %d\n' % avb)

    # multisourcesearch parameters
    forb = set()  # list of forbidden qbit for multisourcesearch
    search_count = 0  # counter for number of failed searches

    # every time a seam is opened, we should check to see if there is a
    # better qubit to consider
    while qbit is None:

        ### Open Seam

        if seam_flag:
            log('Running Seam Opening Routine\n')
            seam_flag = False

            # check for vacancies
            if not any(_vacancy):
                log('No vacant columns/rows to open\n\n')
                raise KeyError('Out of room')

            # find available seams
            seams = availableSeams(adj_qbits)

            # analyse seams
            seam_dicts = map(lambda s: genSeamDict(s, adj_qbits), seams)
            seam_dicts = filter(None, seam_dicts)

            if len(seam_dicts) == 0:
                log('No suitable seams detected\n')
                return None, []

            # select seam to open
            seam_dict = selectSeam(seam_dicts)
            log('current vacancy: %s\n' % str(_vacancy))
            log('selected seam %s :: %s\n' %
                (str(seam_dict['sm']), str(seam_dict['dr'])))

            # open seam
            success = openSeam(**seam_dict)

            if not success:
                log('Failed to open seam\n\n')
                return None, []

            # update adjacent qubits
            log('Seam successfully opened... \n')
            adj_qbits = [
                _qubits[c] for c in _source[cell] if _cell_flags[c]['placed']
            ]
            log('New adjacent qbits: %s\n' % str(adj_qbits))

        ### Pick qubit to assign

        # run multisource search method, get list of candidate qbits
        qbits = multiSourceSearch(adj_qbits, avb, forb=forb)

        # check if found
        if not qbits:
            log('multiSourceSearch failed\n')
            seam_flag = True
            continue

        log('Found %d candidate qubits: %s \n' %
            (len(qbits), map(lambda x: str(x[1]), qbits)))

        # check each candidate qbit from multisourcesearch in order
        for qbit in qbits:

            suit, qbit = qbit

            log('Trying qbit: %s with suitability %s ...' %
                (str(qbit), str(suit)))

            ### Find paths

            routes = [[qb, qbit] for qb in adj_qbits]
            end_points = list(set([it for rt in routes for it in rt]))
            # find best consistent paths
            cost = Routing.Routing(routes, _reserved, writePath=ROUTE_PATH)

            # check successful routing

            if cost >= Routing.COST_BREAK:
                log('routing failed...\n')
                # disable end points
                Routing.disableQubits(end_points)
                continue

            log('\t success\n')
            break
        else:
            log('No suitable qbit found\n')
            qbit = None
            search_count += 1
            if search_count >= MAX_SEARCH_COUNT:
                seam_flag = True
                search_count = 0
                forb.clear()
            else:
                forb.update(map(lambda x: x[1], qbits))

    # get paths
    paths = cp(Routing.getPaths().values())

    # disable path qubits
    qbs = list(set([it for path in paths for it in path]))
    Routing.disableQubits(qbs)

    log('Placed on qubit: %s\n\n' % str(qbit))
    #    log('Paths: \n')
    #    for path in paths:
    #        log('\t %s \n' % str(path))
    #    log('\n')
    return qbit, paths
Exemple #34
0
    def calculate(self,
                  atoms=None,
                  properties=None,
                  system_changes=None,
                  forces=None,
                  virial=None,
                  local_energy=None,
                  local_virial=None,
                  vol_per_atom=None,
                  copy_all_properties=True,
                  calc_args=None,
                  **kwargs):

        # handling the property inputs
        if properties is None:
            properties = self.get_default_properties()
        else:
            properties = list(set(self.get_default_properties() + properties))

        if len(properties) == 0:
            raise RuntimeError('Nothing to calculate')

        for prop in properties:
            if prop not in self.implemented_properties:
                raise RuntimeError(
                    "Don't know how to calculate property '%s'" % prop)

        # initialise dictionary to arguments to be passed to calculator
        _dict_args = {}
        val = _check_arg(forces)
        if val == 'y':
            properties += ['force']
        elif val == 'add':
            properties += ['force']
            _dict_args['force'] = forces

        val = _check_arg(virial)
        if val == 'y':
            properties += ['virial']
        elif val == 'add':
            properties += ['virial']
            _dict_args['virial'] = virial

        val = _check_arg(local_energy)
        if val == 'y':
            properties += ['local_energy']
        elif val == 'add':
            properties += ['local_energy']
            _dict_args['local_energy'] = local_energy

        val = _check_arg(local_virial)
        if val == 'y':
            properties += ['local_virial']
        elif val == 'add':
            properties += ['local_virial']
            _dict_args['local_virial'] = local_virial

        # needed dry run of the ase calculator
        ase.calculators.calculator.Calculator.calculate(
            self, atoms, properties, system_changes)

        if not self.calculation_always_required and not self.calculation_required(
                self.atoms, properties):
            return

        # construct the quip atoms object which we will use to calculate on
        self._quip_atoms = quippy.convert.ase_to_quip(self.atoms)

        # constructing args_string with automatically aliasing the calculateable non-quippy properties
        # calc_args string to be passed to Fortran code
        args_str = self.calc_args
        if calc_args is not None:
            if isinstance(calc_args, dict):
                calc_args = key_val_dict_to_str(calc_args)
            args_str += ' ' + calc_args
        if kwargs is not None:
            args_str += ' ' + key_val_dict_to_str(kwargs)

        args_str += ' energy'
        # no need to add logic to energy, it is calculated anyways (returned when potential called)
        if 'virial' in properties or 'stress' in properties:
            args_str += ' virial'
        if 'local_virial' in properties or 'stresses' in properties:
            args_str += ' local_virial'
        if 'energies' in properties or 'local_energy' in properties:
            args_str += ' local_energy'
        if 'forces' in properties:
            args_str += ' force'
        # TODO: implement 'elastic_constants', 'unrelaxed_elastic_constants', 'numeric_forces'

        # fixme: workaround to get the calculated energy, because the wrapped dictionary is not handling that float well
        ener_dummy = np.zeros(1, dtype=float)

        # the calculation itself
        # print('Calling QUIP Potential.calc() with args_str "{}"'.format(args_str))
        self._quip_potential.calc(self._quip_atoms,
                                  args_str=args_str,
                                  energy=ener_dummy,
                                  **_dict_args)

        # retrieve data from _quip_atoms.properties and _quip_atoms.params
        _quip_properties = quippy.convert.get_dict_arrays(
            self._quip_atoms.properties)
        _quip_params = quippy.convert.get_dict_arrays(self._quip_atoms.params)

        self.results['energy'] = ener_dummy[0]
        self.results['free_energy'] = self.results['energy']

        # process potential output to ase.properties
        # not handling energy here, because that is always returned by the potential above
        if 'virial' in _quip_params.keys():
            stress = -_quip_params['virial'].copy() / self.atoms.get_volume()
            # convert to 6-element array in Voigt order
            self.results['stress'] = np.array([
                stress[0, 0], stress[1, 1], stress[2, 2], stress[1, 2],
                stress[0, 2], stress[0, 1]
            ])
            self.results['virial'] = _quip_params['virial'].copy()

        if 'force' in _quip_properties.keys():
            self.results['forces'] = np.copy(_quip_properties['force'].T)

        if 'local_energy' in _quip_properties.keys():
            self.results['energies'] = np.copy(
                _quip_properties['local_energy'].T)

        if 'local_virial' in _quip_properties.keys():
            self.results['local_virial'] = np.copy(
                _quip_properties['local_virial'])

        if 'stresses' in properties:
            # use the correct atomic volume
            if vol_per_atom is not None:
                if vol_per_atom in self.atoms.arrays.keys():
                    # case of reference to a column in atoms.arrays
                    _v_atom = self.atoms.arrays[vol_per_atom]
                else:
                    # try for case of a given volume
                    try:
                        _v_atom = float(vol_per_atom)
                    except ValueError:
                        # cannot convert to float, so wrong
                        raise ValueError(
                            'volume_per_atom: not found in atoms.arrays.keys() and cannot utilise value '
                            'as given atomic volume')
            else:
                # just use average
                _v_atom = self.atoms.get_volume() / self._quip_atoms.n
            self.results['stresses'] = -np.copy(
                _quip_properties['local_virial']).T.reshape(
                    (self._quip_atoms.n, 3, 3), order='F') / _v_atom
        if isinstance(copy_all_properties, bool) and copy_all_properties:
            if atoms is not None:
                _at_list = [self.atoms, atoms]
            else:
                _at_list = list(self.atoms)

            for at in _at_list:
                _skip_keys = set(
                    list(self.results.keys()) + [
                        'Z', 'pos', 'species', 'map_shift', 'n_neighb',
                        'force', 'local_energy', 'local_virial'
                    ])

                # default params arguments
                at.info['energy'] = self.results['energy']

                if 'stress' in self.results.keys():
                    at.info['stress'] = self.results['stress'].copy()

                # default array arguments
                for key in ('forces', 'energies', 'stresses'):
                    if key in self.results.keys():
                        at.arrays[key] = self.results[key].copy()

                # any other params
                for param, val in _quip_params.items():
                    if param not in _skip_keys:
                        at.info[param] = cp(val)

                # any other arrays
                for prop, val in _quip_properties.items():
                    if prop not in _skip_keys:
                        at.arrays[prop] = np.copy(val, order='C')
Exemple #35
0
R = np.linspace(R1, R2, N)
dr = R[1] - R[0]

# thermal parameters
T1 = 373
T2 = 293
k = 205
rho = 2700
C = 900  #J/kgK

dt = 1e-4

# computation variables section
T = np.zeros(N, dtype=float) + T2
T[0] = cp(T1)
Ts = cp(T)

# alpha = k/rho/C

# solution section
for iterate in range(100000):
    # solution
    for i in range(1, N - 1):
        C1 = k * (T[i + 1] - 2 * T[i] + T[i - 1]) / dr**2
        C2 = k / R[i] * (T[i + 1] - T[i - 1]) / dr / 2

        F = (C1 + C2) / rho / C

        Ts[i] = T[i] + F * dt
Exemple #36
0
 def expand(self, currentState):
     self.nextStates.clear()
     self.nextAction.clear()
     pos = currentState.index(0)
     self.nextState0 = cp(currentState)
     self.nextState1 = cp(currentState)
     self.nextState2 = cp(currentState)
     self.nextState3 = cp(currentState)
     if (pos == 0):
         self.nextState0[0] = currentState[1]
         self.nextState0[1] = 0
         self.nextStates.append(self.nextState0)
         self.nextAction.append("left")
         self.nextState1[0] = currentState[3]
         self.nextState1[3] = 0
         self.nextStates.append(self.nextState1)
         self.nextAction.append("up")
     if (pos == 1):
         self.nextState0[1] = currentState[0]
         self.nextState0[0] = 0
         self.nextStates.append(self.nextState0)
         self.nextAction.append("right")
         self.nextState1[1] = currentState[2]
         self.nextState1[2] = 0
         self.nextStates.append(self.nextState1)
         self.nextAction.append("left")
         self.nextState2[1] = currentState[4]
         self.nextState2[4] = 0
         self.nextStates.append(self.nextState2)
         self.nextAction.append("up")
     if (pos == 2):
         self.nextState0[2] = currentState[1]
         self.nextState0[1] = 0
         self.nextStates.append(self.nextState0)
         self.nextAction.append("right")
         self.nextState1[2] = currentState[5]
         self.nextState1[5] = 0
         self.nextStates.append(self.nextState1)
         self.nextAction.append("up")
     if (pos == 3):
         self.nextState0[3] = currentState[0]
         self.nextState0[0] = 0
         self.nextStates.append(self.nextState0)
         self.nextAction.append("down")
         self.nextState1[3] = currentState[4]
         self.nextState1[4] = 0
         self.nextStates.append(self.nextState1)
         self.nextAction.append("left")
         self.nextState2[3] = currentState[6]
         self.nextState2[6] = 0
         self.nextStates.append(self.nextState2)
         self.nextAction.append("up")
     if (pos == 4):
         self.nextState0[4] = currentState[1]
         self.nextState0[1] = 0
         self.nextStates.append(self.nextState0)
         self.nextAction.append("down")
         self.nextState1[4] = currentState[3]
         self.nextState1[3] = 0
         self.nextStates.append(self.nextState1)
         self.nextAction.append("right")
         self.nextState2[4] = currentState[5]
         self.nextState2[5] = 0
         self.nextStates.append(self.nextState2)
         self.nextAction.append("left")
         self.nextState3[4] = currentState[7]
         self.nextState3[7] = 0
         self.nextStates.append(self.nextState3)
         self.nextAction.append("up")
     if (pos == 5):
         self.nextState0[5] = currentState[2]
         self.nextState0[2] = 0
         self.nextStates.append(self.nextState0)
         self.nextAction.append("down")
         self.nextState1[5] = currentState[4]
         self.nextState1[4] = 0
         self.nextStates.append(self.nextState1)
         self.nextAction.append("right")
         self.nextState2[5] = currentState[8]
         self.nextState2[8] = 0
         self.nextStates.append(self.nextState2)
         self.nextAction.append("up")
     if (pos == 6):
         self.nextState0[6] = currentState[3]
         self.nextState0[3] = 0
         self.nextStates.append(self.nextState0)
         self.nextAction.append("down")
         self.nextState1[6] = currentState[7]
         self.nextState1[7] = 0
         self.nextStates.append(self.nextState1)
         self.nextAction.append("left")
     if (pos == 7):
         self.nextState0[7] = currentState[6]
         self.nextState0[6] = 0
         self.nextStates.append(self.nextState0)
         self.nextAction.append("right")
         self.nextState1[7] = currentState[4]
         self.nextState1[4] = 0
         self.nextStates.append(self.nextState1)
         self.nextAction.append("down")
         self.nextState2[7] = currentState[8]
         self.nextState2[8] = 0
         self.nextStates.append(self.nextState2)
         self.nextAction.append("left")
     if (pos == 8):
         self.nextState0[8] = currentState[5]
         self.nextState0[5] = 0
         self.nextStates.append(self.nextState0)
         self.nextAction.append("down")
         self.nextState1[8] = currentState[7]
         self.nextState1[7] = 0
         self.nextStates.append(self.nextState1)
         self.nextAction.append("right")
Exemple #37
0
udirichlet = alphasigma


# Aesthetics ... fills in the box with an arbitrary constant value
def fillin(un, ixbox, iybox):
    border = cp(un[ixbox[0] - 1, iybox[0]])
    for ix in ixbox:
        for iy in iybox:
            un[ix, iy] = border
    return un


# Initialize u0 and un as ones/zeros matrices
u0 = np.ones([nx, ny]) * udirichlet  # old u values
u0 = fillin(u0, ixbox, iybox)
u1 = cp(u0)

# In[13]:

# Physical parameters translated into values for computation
dx2 = dx**2
dy2 = dy**2
dt = (dx2 + dy2) / D / 10
print dt
Dxeff = D * dt / dx2
Dyeff = D * dt / dy2
gneumanneff = gneumann * dt
print 'gneumann effective', gneumanneff


# The differential equation solver
Exemple #38
0
 def items(self):
     docids = cp(self.elements)
     docids.add(self.id)
     return sorted(list(docids))
Exemple #39
0
rho0  = 1.5                 # inlet density of fluid
T0    = 500.0               # inlet temperature
Mach  = 3.0                 # inlet mach number
gamma = 1.4                 # ratio of specific heats
R     = 287.0               # gas constant
L     = 1.0                 # length of domain

# reading data from files-------------------------------------------------------
fid_21  = pd.read_csv('Data21.csv')
fid_41  = pd.read_csv('Data41.csv')
fid_81  = pd.read_csv('Data81.csv')

# computing errors from results-------------------------------------------------
Umax = Mach*np.sqrt(gamma*R*T0)   # analytical velocity throughout domain

Erms = np.zeros(3); Ep = cp(Erms) # initializing arrays

# computing rms errors
Erms[0] = np.sqrt(((Umax-fid_21["U"])**2).mean())
Erms[1] = np.sqrt(((Umax-fid_41["U"])**2).mean())
Erms[2] = np.sqrt(((Umax-fid_81["U"])**2).mean())

# computing percentage errors
Ep[0] = (abs(Umax-fid_21["U"])/Umax).mean()*100.0
Ep[1] = (abs(Umax-fid_41["U"])/Umax).mean()*100.0
Ep[2] = (abs(Umax-fid_81["U"])/Umax).mean()*100.0

# computing grid spaces
DX = [L/40,L/80,L/160]

# dumping error computations to file
Exemple #40
0
def solve(elf_dmg=3, ignore_losses=True):

    # Get a new fresh grid
    global grid
    grid = cp(backup)

    # Units saved as (y, x, hp, type)
    elves = []
    goblins = []
    for y, row in enumerate(grid):
        for x, obj in enumerate(grid[y]):
            if obj == "E":
                elves += [(y, x, 200, "e")]
            elif obj == "G":
                goblins += [(y, x, 200, "g")]

    # Main loop
    turn = 0
    while elves and goblins:

        turn += 1

        ei = 0
        gi = 0
        elves.sort()
        goblins.sort()

        while ei != len(elves) or gi != len(goblins):

            # If no units are left we break and
            # remove the turn since it is not complete
            if len(elves) == 0 or len(goblins) == 0:
                turn -= 1
                break

            # Assign the unit to move
            if ei == len(elves):
                curr = goblins[gi]
            elif gi == len(goblins):
                curr = elves[ei]
            else:
                if goblins[gi] < elves[ei]:
                    curr = goblins[gi]
                else:
                    curr = elves[ei]

            # Set current enemy
            en = "G" if curr[3] == "e" else "E"
            enemies = goblins if en == "G" else elves

            search = en not in neigh_units(curr[0], curr[1])

            # Search if no enemies are near
            if search:

                # Set potential tiles
                for e in enemies:
                    for y, x in neigh(e[0], e[1]):
                        if grid[y][x] == ".":
                            grid[y][x] = "@"

                # Find the next position for the unit
                new_pos = step(curr[0], curr[1])

                # Undo potential tiles
                for e in enemies:
                    for y, x in neigh(e[0], e[1]):
                        if grid[y][x] == "@":
                            grid[y][x] = "."

                # Update unit and grid with new positions
                if new_pos != None:
                    grid[new_pos[0]][new_pos[1]] = grid[curr[0]][curr[1]]
                    grid[curr[0]][curr[1]] = "."
                    curr = (new_pos[0], new_pos[1], curr[2], curr[3])

            # Attack

            # Find targets
            targets = []
            for pos, unit in zip(neigh(curr[0], curr[1]),
                                 neigh_units(curr[0], curr[1])):
                if unit == en:
                    for i, e in enumerate(enemies):
                        if e[0] == pos[0] and e[1] == pos[1]:
                            targets.append([i, e])

            # Attack if we have a target
            if targets:

                # Get target to attack
                target = targets[0]
                for t in targets[1:]:
                    if t[1][2] < target[1][2]:
                        target = t

                # Apply damage
                dmg = elf_dmg if curr[3] == "e" else 3
                target[1] = (target[1][0], target[1][1], target[1][2] - dmg,
                             target[1][3])
                if target[1][2] <= 0:
                    grid[target[1][0]][target[1][1]] = "."
                    if en == "G":
                        if target[0] < gi: gi -= 1
                        del goblins[target[0]]
                    else:
                        if target[0] < ei: ei -= 1
                        del elves[target[0]]
                        if not ignore_losses:
                            return 0
                else:
                    if en == "G":
                        goblins[target[0]] = target[1]
                    else:
                        elves[target[0]] = target[1]

            # Update indexes and unit in list
            if curr[3] == "e":
                elves[ei] = curr
                ei += 1
            else:
                goblins[gi] = curr
                gi += 1

    score = sum([hp for y, x, hp, t in elves + goblins]) * turn

    return score
Exemple #41
0
def reserveQubits(qbits):
    '''for each qbit in qbits, check if adjacent qubits should be
    reserved. Reserve if appropriate.
    '''
    global _cells, _numAdj, _qbit_flags, _reserved

    if not qbits:
        return

    #log('\n\nReserving locals qbits for: %s\n' % map(str, qbits))

    for qbit in qbits:
        #log('\nchecking qbit %s\n' % str(qbit))
        # cell properties
        try:
            cell = _cells[qbit]
            if cell is None:
                raise KeyError
        except KeyError:
            raise KeyError('Qbit %s is not assigned to a cell...' % str(qbit))
        num_adj = _numAdj[cell]

        #log('Required adjacency: %d\n' % num_adj)

        # wipe old reservations
        old_res = set()
        if _reserved[qbit]:
            #log('releasing qbits: %s: \n' % map(str, _reserved[qbit]))
            for qb in _reserved[qbit]:
                _qbit_flags[qb]['reserved'] = False
            setTileOcc(_reserved[qbit], dec=True)
            old_res = cp(_reserved[qbit])
            _reserved[qbit].clear()

        # get list of all adjacent unreserved qubits and count qbit type
        qbs = []
        _qbit_flags[qbit]['c_in'] = set()
        _qbit_flags[qbit]['c_out'] = 0
        for q in _qbitAdj[qbit]:
            if not (_qbit_flags[q]['taken'] or _qbit_flags[q]['reserved']):
                qbs.append(q)
                if q[0:2] == qbit[0:2]:
                    _qbit_flags[qbit]['c_in'].add(q)
                else:
                    _qbit_flags[qbit]['c_out'] += 1

        # if exact amount of adjacent qubits available, reserve
        if num_adj == len(qbs):
            # log('Reserving all free qbits for qbit %s\n' % str(qbit))
            # reserve all adjacent qubits
            res_check = set()
            for qb in qbs:
                _qbit_flags[qb]['reserved'] = True
                _reserved[qbit].add(qb)
                res_check.update(_qbit_flags[qb]['prox'])
            setTileOcc(qbs)
            # if reserved qubits changed, check local qubits for reservations
            if old_res == _reserved[qbit]:
                reserveQubits(res_check - set(qbits))

        # check for insufficient qubits
        elif num_adj > len(qbs):
            raise KeyError('Insufficent free qubits for cell %s' % str(cell))

    setVacancy()
Exemple #42
0
	def soft_weat(self, sets, target_at_dict, bias_combinations, l=1, nullspace_iterations = -1, neighb_count=20):
		'''
		SoftWEAT Debiasing

		Parameters
		----------
		sets: Existing attribute and target set of words
		target_at_dict: dict | Keys being target/subclass sets and values corresponding attribute sets for debiasing
		bias_combinations: dict | Classes and respective subclasses to be included in debiasing, by default in following form : {"gender" : ["male_terms", "female_terms"],   "race": ["black_names", "white_names"], "religion" : ["islam_words", "atheism_words", "christianity_words"]}
		l: float | Trade-off parameter (1 - Highest level of removal, 0 - lowest)
		nullspace_iterations: int | Number of nullspaces to be included in iterative bias minimization. If -1, all are taken into consideration
		neighb_count: int | Number of neighbors that initial target/subclass lists are expanded on
		'''

		nullspace_dict, neighbors, subclasses, duplicates = {}, {}, [], {} 
		target_words_complete = list(dict.fromkeys([word for class_name in target_at_dict for word in sets[class_name]]))
		substopwords = set(stopwords.words('english')) - set(target_words_complete)
		attribute_sets_complete = set([word for subclass_name in target_at_dict for a_set in target_at_dict[subclass_name]for word in sets[a_set]])

		cs_matrix = cs([self.get_value(word) for word in target_words_complete], self.vectors)
		cs_idx = {word:idx for idx, word in enumerate(target_words_complete)}
		dictionary_categories = {"gender" : ["male_terms", "female_terms"],   "race": ["black_names", "white_names"], "religion" : ["islam_words", "atheism_words", "christianity_words"]}

		#Generating neighbors
		for class_name in dictionary_categories.keys():
			for subclass_name in dictionary_categories[class_name]:

				if subclass_name not in target_at_dict: continue
				
				subclasses.append(subclass_name)
				neighbors[subclass_name] = set(sets[subclass_name])
				for word in sets[subclass_name]:
					new_words = set([self.words[neighbor_idx] for neighbor_idx in (cs_matrix[cs_idx[word],:].argsort()[-neighb_count:]) if cs([self.vectors[neighbor_idx]], [self.get_value(word)]) > 0.6])
					new_words -= set([word for class_name, subclasses in dictionary_categories.items() for subcl in subclasses for word in sets[subcl] if subcl!=subclass_name])
					neighbors[subclass_name] = neighbors[subclass_name].union(new_words)

					#Identifying duplicates
					for word in new_words:
						for subclass in subclasses:
							if word in neighbors[subclass] and subclass!=subclass_name:
								if word not in duplicates:
									duplicates[word] = set()
								duplicates[word].add(subclass)
								duplicates[word].add(subclass_name)
		
		#Removing duplicates
		for word_dup in duplicates:
			word_dup_subclasses = list(duplicates[word_dup])
			cs_subclasses = [cs([np.sum([self.get_value(w) for w in sets[subclass_name]], axis = 0)], [self.get_value(word_dup)]) for subclass_name in word_dup_subclasses]
			for sc in [x for i,x in enumerate(word_dup_subclasses) if i!=np.argmax(cs_subclasses)]:
				neighbors[sc].remove(word_dup)

		for class_name in dictionary_categories.keys():

			#Iterating through target set that might contain bias towards some attribute sets of words 
			for subclass_name in dictionary_categories[class_name]:

				if subclass_name not in target_at_dict.keys(): 
					continue 

				attribute_set_names = list(target_at_dict[subclass_name]); attribute_set_names.sort()
				all_words_for_subclass = list( neighbors[subclass_name] - attribute_sets_complete - substopwords)
				mean_value = np.mean([self.get_value(word) for word in all_words_for_subclass], axis=0)
				vectors_for_nullspacing = np.array([self.get_wordset_mean(sets[a_set]) for a_set in attribute_set_names])
				null_space = ns(vectors_for_nullspacing)

				bias_levels_per_nullspace = []
				#print(f'Words for subclass {subclass_name} len: {(all_words_for_subclass)}')

				no_of_iterations = np.size(null_space, 1) if nullspace_iterations==-1 else nullspace_iterations
				for k in range(0, no_of_iterations):

					e = cp(self)
					nullspace_dict[subclass_name] = null_space[:,k] 
					T = nullspace_dict[subclass_name] -  mean_value
					T = make_translation_matrix(T, l)
					vectors_for_translation = np.vstack([np.transpose([e.get_value(word) for word in all_words_for_subclass]), np.ones((1, len(all_words_for_subclass)))])
					transformed_points = np.matmul(T, vectors_for_translation)
					for i, word in enumerate(all_words_for_subclass):  
						e.vectors[e.get_index_out_of_word(word)] = transformed_points[0:-1,i]
					_, bias_levels_d, _, _, _ = weat_analysis(e, bias_combinations, sets, steps=1000)
					bias_levels_per_nullspace.append(bias_levels_d[class_name])
					del e

				min_nullspace_key = np.argmin(bias_levels_per_nullspace)
				final_t_vector = null_space[:,min_nullspace_key] - mean_value
				T_final = make_translation_matrix(final_t_vector, l)
				vectors_for_translation = np.vstack([np.transpose([self.get_value(word) for word in all_words_for_subclass]), np.ones((1, len(all_words_for_subclass)))])
				transformed_points = np.matmul(T_final, vectors_for_translation)

				for i, word in enumerate(all_words_for_subclass):  
					self.vectors[self.get_index_out_of_word(word)] = transformed_points[0:-1,i]
				
				print(f'Subclass {subclass_name} finished.')
				
		self.normalize_vectors()
Exemple #43
0
def encoder(u1,u2,u3,g):        # encoder funtion to compute F's from U's
    f1 = cp(u2)
    f2 = (3.0-g)/2.0*u2**2/u1 + (g-1.0)*u3
    f3 = g*u2*u3/u1 - u2**3/u1**2/2.0*(g-1.0)

    return f1,f2,f3
Exemple #44
0
            outfile.write(infile.read())
            outfile.close()


# Download
sy = ((syear - 1) // 10 - 3) * 10 + 1  # 使用する気候値の最初の年
if eyear % 10 == 0:
    ey = (eyear // 10 - 1) * 10  # 使用する気候値の最後の年
else:
    ey = (eyear // 10) * 10  # 使用する気候値の最後の年

while sy <= ey:
    grib_download(sy)  # 気候値を算出する為のファイルをダウンロード
    sy += int(1)

time = cp(syear)
while time <= eyear:
    grib_download(time)  # 抽出したい年のファイルをダウンロード
    time += int(1)

if path.exists("auth.rda.ucar.edu"):
    remove("auth.rda.ucar.edu")
else:
    pass

# 基準値作成
while syear <= eyear:
    if 'lcl_90' in locals():
        pass
    else:
        print('Making lower confidence limit data...')
Exemple #45
0
def fillin(un, ixbox, iybox):
    border = cp(un[ixbox[0] - 1, iybox[0]])
    for ix in ixbox:
        for iy in iybox:
            un[ix, iy] = border
    return un
def clones(module, N):

    return nn.ModuleList([cp(module) for _ in range(N)])
Exemple #47
0
    def __call__(self, img):

        mylog = mylogger.logging.getLogger("PyBDSM." + img.log + "Wavelet")

        if img.opts.atrous_do:
            if img.nisl == 0:
                mylog.warning(
                    "No islands found. Skipping wavelet decomposition.")
                img.completed_Ops.append('wavelet_atrous')
                return

            mylog.info(
                "Decomposing gaussian residual image into a-trous wavelets")
            bdir = img.basedir + '/wavelet/'
            if img.opts.output_all:
                if not os.path.isdir(bdir): os.makedirs(bdir)
                if not os.path.isdir(bdir + '/residual/'):
                    os.makedirs(bdir + '/residual/')
                if not os.path.isdir(bdir + '/model/'):
                    os.makedirs(bdir + '/model/')
            dobdsm = img.opts.atrous_bdsm_do
            filter = {
                'tr': {
                    'size': 3,
                    'vec': [1. / 4, 1. / 2, 1. / 4],
                    'name': 'Triangle'
                },
                'b3': {
                    'size': 5,
                    'vec': [1. / 16, 1. / 4, 3. / 8, 1. / 4, 1. / 16],
                    'name': 'B3 spline'
                }
            }

            if dobdsm: wchain, wopts = self.setpara_bdsm(img)

            n, m = img.ch0_arr.shape

            # Calculate residual image that results from normal (non-wavelet) Gaussian fitting
            Op_make_residimage()(img)
            resid = img.resid_gaus_arr

            lpf = img.opts.atrous_lpf
            if lpf not in ['b3', 'tr']: lpf = 'b3'
            jmax = img.opts.atrous_jmax
            l = len(filter[lpf]['vec']
                    )  # 1st 3 is arbit and 2nd 3 is whats expected for a-trous
            if jmax < 1 or jmax > 15:  # determine jmax
                # Check if largest island size is
                # smaller than 1/3 of image size. If so, use it to determine jmax.
                min_size = min(resid.shape)
                max_isl_shape = (0, 0)
                for isl in img.islands:
                    if isl.image.shape[0] * isl.image.shape[1] > max_isl_shape[
                            0] * max_isl_shape[1]:
                        max_isl_shape = isl.image.shape
                if max_isl_shape != (
                        0, 0) and min(max_isl_shape) < min(resid.shape) / 3.0:
                    min_size = min(max_isl_shape) * 4.0
                else:
                    min_size = min(resid.shape)
                jmax = int(
                    floor(
                        log((min_size / 3.0 * 3.0 - l) /
                            (l - 1) + 1) / log(2.0) + 1.0)) + 1
                if min_size * 0.55 <= (l + (l - 1) * (2**(jmax) - 1)):
                    jmax = jmax - 1
            img.wavelet_lpf = lpf
            img.wavelet_jmax = jmax
            mylog.info("Using " + filter[lpf]['name'] +
                       ' filter with J_max = ' + str(jmax))

            img.atrous_islands = []
            img.atrous_gaussians = []
            img.atrous_sources = []
            img.atrous_opts = []
            img.resid_wavelets_arr = cp(img.resid_gaus_arr)

            im_old = img.resid_wavelets_arr
            total_flux = 0.0
            ntot_wvgaus = 0
            stop_wav = False
            pix_masked = N.where(N.isnan(resid) == True)
            jmin = 1
            if img.opts.ncores is None:
                numcores = 1
            else:
                numcores = img.opts.ncores
            for j in range(jmin, jmax +
                           1):  # extra +1 is so we can do bdsm on cJ as well
                mylogger.userinfo(mylog, "\nWavelet scale #" + str(j))
                im_new = self.atrous(im_old,
                                     filter[lpf]['vec'],
                                     lpf,
                                     j,
                                     numcores=numcores,
                                     use_scipy_fft=img.opts.use_scipy_fft)
                im_new[
                    pix_masked] = N.nan  # since fftconvolve wont work with blanked pixels
                if img.opts.atrous_sum:
                    w = im_new
                else:
                    w = im_old - im_new
                im_old = im_new
                suffix = 'w' + ` j `
                filename = img.imagename + '.atrous.' + suffix + '.fits'
                if img.opts.output_all:
                    func.write_image_to_file('fits', filename, w, img, bdir)
                    mylog.info('%s %s' % ('Wrote ', img.imagename +
                                          '.atrous.' + suffix + '.fits'))

                # now do bdsm on each wavelet image.
                if dobdsm:
                    wopts['filename'] = filename
                    wopts['basedir'] = bdir
                    box = img.rms_box[0]
                    y1 = (l + (l - 1) * (2**(j - 1) - 1))
                    bs = max(5 * y1, box)  # changed from 10 to 5
                    if bs > min(n, m) / 2:
                        wopts['rms_map'] = False
                        wopts['mean_map'] = 'const'
                        wopts['rms_box'] = None
                    else:
                        wopts['rms_box'] = (bs, bs / 3)
                        if hasattr(img, '_adapt_rms_isl_pos'):
                            bs_bright = max(5 * y1, img.rms_box_bright[0])
                            if bs_bright < bs / 1.5:
                                wopts['adaptive_rms_box'] = True
                                wopts['rms_box_bright'] = (bs_bright,
                                                           bs_bright / 3)
                            else:
                                wopts['adaptive_rms_box'] = False
                    if j <= 3:
                        wopts['ini_gausfit'] = 'default'
                    else:
                        wopts['ini_gausfit'] = 'nobeam'
                    wid = (l + (l - 1) * (2**(j - 1) - 1))  # / 3.0
                    b1, b2 = img.pixel_beam()[0:2]
                    b1 = b1 * fwsig
                    b2 = b2 * fwsig
                    cdelt = img.wcs_obj.acdelt[:2]

                    wimg = Image(wopts)
                    wimg.beam = (sqrt(wid * wid + b1 * b1) * cdelt[0] * 2.0,
                                 sqrt(wid * wid + b2 * b2) * cdelt[1] * 2.0,
                                 0.0)
                    wimg.orig_beam = img.beam
                    wimg.pixel_beam = img.pixel_beam
                    wimg.pixel_beamarea = img.pixel_beamarea
                    wimg.log = 'Wavelet.'
                    wimg.basedir = img.basedir
                    wimg.extraparams['bbsprefix'] = suffix
                    wimg.extraparams['bbsname'] = img.imagename + '.wavelet'
                    wimg.extraparams['bbsappend'] = True
                    wimg.bbspatchnum = img.bbspatchnum
                    wimg.waveletimage = True
                    wimg.j = j
                    if hasattr(img, '_adapt_rms_isl_pos'):
                        wimg._adapt_rms_isl_pos = img._adapt_rms_isl_pos

                    self.init_image_simple(wimg, img, w, '.atrous.' + suffix)
                    for op in wchain:
                        op(wimg)
                        gc.collect()
                        if isinstance(op,
                                      Op_islands) and img.opts.atrous_orig_isl:
                            if wimg.nisl > 0:

                                # Find islands that do not share any pixels with
                                # islands in original ch0 image.
                                good_isl = []

                                # Make original rank image boolean; rank counts from 0, with -1 being
                                # outside any island
                                orig_rankim_bool = N.array(img.pyrank + 1,
                                                           dtype=bool)

                                # Multiply rank images
                                old_islands = orig_rankim_bool * (wimg.pyrank +
                                                                  1) - 1

                                # Exclude islands that don't overlap with a ch0 island.
                                valid_ids = set(old_islands.flatten())
                                for idx, wvisl in enumerate(wimg.islands):
                                    if idx in valid_ids:
                                        wvisl.valid = True
                                        good_isl.append(wvisl)
                                    else:
                                        wvisl.valid = False

                                wimg.islands = good_isl
                                wimg.nisl = len(good_isl)
                                mylogger.userinfo(mylog,
                                                  "Number of islands found",
                                                  '%i' % wimg.nisl)

                                # Renumber islands:
                                for wvindx, wvisl in enumerate(wimg.islands):
                                    wvisl.island_id = wvindx

                        if isinstance(op, Op_gausfit):
                            # If opts.atrous_orig_isl then exclude Gaussians outside of
                            # the original ch0 islands
                            nwvgaus = 0
                            if img.opts.atrous_orig_isl:
                                gaul = wimg.gaussians
                                tot_flux = 0.0

                                if img.ngaus == 0:
                                    gaus_id = -1
                                else:
                                    gaus_id = img.gaussians[-1].gaus_num
                                wvgaul = []
                                for g in gaul:
                                    if not hasattr(g, 'valid'):
                                        g.valid = False
                                    if not g.valid:
                                        try:
                                            isl_id = img.pyrank[
                                                int(g.centre_pix[0] + 1),
                                                int(g.centre_pix[1] + 1)]
                                        except IndexError:
                                            isl_id = -1
                                        if isl_id >= 0:
                                            isl = img.islands[isl_id]
                                            gcenter = (int(g.centre_pix[0] -
                                                           isl.origin[0]),
                                                       int(g.centre_pix[1] -
                                                           isl.origin[1]))
                                            if not isl.mask_active[gcenter]:
                                                gaus_id += 1
                                                gcp = Gaussian(
                                                    img, g.parameters[:],
                                                    isl.island_id, gaus_id)
                                                gcp.gaus_num = gaus_id
                                                gcp.wisland_id = g.island_id
                                                gcp.jlevel = j
                                                g.valid = True
                                                isl.gaul.append(gcp)
                                                isl.ngaus += 1
                                                img.gaussians.append(gcp)
                                                nwvgaus += 1
                                                tot_flux += gcp.total_flux
                                            else:
                                                g.valid = False
                                                g.jlevel = 0
                                        else:
                                            g.valid = False
                                            g.jlevel = 0
                                vg = []
                                for g in wimg.gaussians:
                                    if g.valid:
                                        vg.append(g)
                                wimg.gaussians = vg
                                mylogger.userinfo(
                                    mylog, "Number of valid wavelet Gaussians",
                                    str(nwvgaus))
                            else:
                                # Keep all Gaussians and merge islands that overlap
                                tot_flux = check_islands_for_overlap(img, wimg)

                                # Now renumber the islands and adjust the rank image before going to next wavelet image
                                renumber_islands(img)

                    total_flux += tot_flux
                    if img.opts.interactive and has_pl:
                        dc = '\033[34;1m'
                        nc = '\033[0m'
                        print dc + '--> Displaying islands and rms image...' + nc
                        if max(wimg.ch0_arr.shape) > 4096:
                            print dc + '--> Image is large. Showing islands only.' + nc
                            wimg.show_fit(rms_image=False,
                                          mean_image=False,
                                          ch0_image=False,
                                          ch0_islands=True,
                                          gresid_image=False,
                                          sresid_image=False,
                                          gmodel_image=False,
                                          smodel_image=False,
                                          pyramid_srcs=False)
                        else:
                            wimg.show_fit()
                        prompt = dc + "Press enter to continue or 'q' stop fitting wavelet images : " + nc
                        answ = raw_input_no_history(prompt)
                        while answ != '':
                            if answ == 'q':
                                img.wavelet_jmax = j
                                stop_wav = True
                                break
                            answ = raw_input_no_history(prompt)
                    if len(wimg.gaussians) > 0:
                        img.resid_wavelets_arr = self.subtract_wvgaus(
                            img.opts, img.resid_wavelets_arr, wimg.gaussians,
                            wimg.islands)
                        if img.opts.atrous_sum:
                            im_old = self.subtract_wvgaus(
                                img.opts, im_old, wimg.gaussians, wimg.islands)
                    if stop_wav == True:
                        break

            pyrank = N.zeros(img.pyrank.shape, dtype=N.int32)
            for i, isl in enumerate(img.islands):
                isl.island_id = i
                for g in isl.gaul:
                    g.island_id = i
                for dg in isl.dgaul:
                    dg.island_id = i
                pyrank[isl.bbox] += N.invert(isl.mask_active) * (i + 1)
            pyrank -= 1  # align pyrank values with island ids and set regions outside of islands to -1
            img.pyrank = pyrank

            pdir = img.basedir + '/misc/'
            img.ngaus += ntot_wvgaus
            img.total_flux_gaus += total_flux
            mylogger.userinfo(mylog,
                              "Total flux density in model on all scales",
                              '%.3f Jy' % img.total_flux_gaus)
            if img.opts.output_all:
                func.write_image_to_file('fits',
                                         img.imagename + '.atrous.cJ.fits',
                                         im_new, img, bdir)
                mylog.info('%s %s' %
                           ('Wrote ', img.imagename + '.atrous.cJ.fits'))
                func.write_image_to_file(
                    'fits', img.imagename + '.resid_wavelets.fits',
                    (img.ch0_arr - img.resid_gaus_arr +
                     img.resid_wavelets_arr), img, bdir + '/residual/')
                mylog.info('%s %s' %
                           ('Wrote ', img.imagename + '.resid_wavelets.fits'))
                func.write_image_to_file(
                    'fits', img.imagename + '.model_wavelets.fits',
                    (img.resid_gaus_arr - img.resid_wavelets_arr), img,
                    bdir + '/model/')
                mylog.info('%s %s' %
                           ('Wrote ', img.imagename + '.model_wavelets.fits'))
            img.completed_Ops.append('wavelet_atrous')
Exemple #48
0
# this is the python script for plotting the data
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from copy import copy as cp

# reading the data from file
cols = ["X", "Y", "U", "V", "P"]
fid = pd.read_csv("Data.csv", header=None, skiprows=1, names=cols)

nx = 129
ny = 129

# reconstructing the matrix
X = np.zeros([ny, nx], dtype=float)
Y = cp(X)
U = cp(X)
V = cp(X)
P = cp(X)
count = 0
for i in range(nx):
    for j in range(ny):
        X[j, i] = fid["X"][count]
        Y[j, i] = fid["Y"][count]
        U[j, i] = fid["U"][count]
        V[j, i] = fid["V"][count]
        P[j, i] = fid["P"][count]
        count += 1

# plotting contours
fig1 = plt.figure()
Exemple #49
0
plt.subplots_adjust(wspace = 0.25,hspace=0.25)
lambds = [0.05,0.5,1.0,3.0]

axes = axes.flatten()
for i in range(4):
    numIt = 600 #迭代次数
    delta = 0.01 # 调整系数
    wlog = np.zeros((numIt,M)) #记录weights的变化
    weights = np.zeros(M) #系数向量
    lambd = lambds[i]

    for it in range(1,numIt):
        Lmin = {'value':np.inf,'loc':np.nan,'sign':np.nan} #记录本次迭代的目标函数最小值
        for m in range(M-1,0,-1):
            for sign in (-1,1):
                wbak = cp(weights)
                wbak[m] += delta*sign
                Lcur = np.linalg.norm(values-np.dot(datas,wbak),2)+ lambd*np.linalg.norm(wbak,1)
                #print m,sign,Lcur
                if Lmin['value'] > Lcur: # 如果目标函数值比当前最优值小
                    Lmin['value'] = Lcur
                    Lmin['loc'] = m
                    Lmin['sign'] = sign
        weights[Lmin['loc']] += delta*Lmin['sign']
        wlog[it,:] = weights[:]
    ax = axes[i]
    for m in range(M):
        ax.plot(wlog[:,m])
    ax.set_title('lambda='+np.str(lambd),{'fontname':'STFangsong','fontsize':10})
    ax.set_xlabel(u'迭代次数',{'fontname':'STFangsong','fontsize':10})
    ax.set_ylabel(u'各权值系数',{'fontname':'STFangsong','fontsize':10})
Exemple #50
0
gamma = 1.4  # ratio of specific heats
R = 287.0  # gas constant
SimTime = 1.0  # simulation time in seconds

# computation variables section----------------------------------------
Uin = Mach * np.sqrt(gamma * R * T0)  # inlet velocity
dt = 0.5 * dx / Uin  # time step size

rho = np.linspace(rho0, 0.5 * rho0, nx)
u = np.linspace(Uin, 0.0, nx)  # creating initial field
T = np.linspace(T0, 0.8 * T0, nx)

E = R * T / (gamma - 1.0) + u**2 / 2.0  # combining thermal variables
H = E + R * T

U1 = cp(rho)  # initializing flux variables
U2 = rho * u
U3 = rho * E

F1 = cp(U2)  # initializing x-flux variables
F2 = (3.0 - gamma) / 2.0 * U2**2 / U1 + (gamma - 1.0) * U3
F3 = gamma * U2 * U3 / U1 - U2**3 / 2.0 / U1**2 * (gamma - 1.0)

U1b = cp(U1)
F1b = cp(F1)  # initializing correctors
U2b = cp(U2)
F2b = cp(F2)
U3b = cp(U3)
F3b = cp(F3)

dU1 = np.zeros(nx, dtype=float)
Exemple #51
0
def run_all_regressions(x_train, y_train, regs=0, error_func=mean_squared_error, x_test=None, y_test=None, selection_algo=None, verbose=True, show=False, final_verbose=range(10), final_show=False, sort_key=None, seed=None, split_func=train_test_split, debug=False, save_all_fit_regs=False):
    """
    ********* Description *********
    Try several different regressions, and can show and verbose some of them
    ********* Params *********
    x_train : (np.ndarray(n, dx)) : points
    y_train : (np.ndarray(n, dy)) : targets
    regs : (int) or (str) or [(sklearn.regression)] : regressions used, with get_regressions syntax
    error_func : (func) = sklearn.mean_squared_error : the error used
    x_test : np.ndarray(m, dx) or (int) or (float) = None : test points, or
        indication to use K-fold separation on x_train, 
        more precisely if (int) then the train is on (n-x_test) points, 
        and if (float) then the train is on (n*(1-x_test)) points
        if None we don't compute test error
    y_test : np.ndarray(m, dx) = None : test target
    selection_algo : (MAB class) = None : Rules the run sequence of regressions, cf multi_armed_bandit
    verbose : (bool) or [(bool)] or [(int)] = True : whether we print the regressions error
    show : (bool) or [(bool)] or [(int)] = False : whether we plot the regressions
    final_verbose : (bool) or [(bool)] or [(int)] = range(10) : same as verbose but for reg classement
    final_show : (bool) or [(bool)] or [(int)] = False : same as show but for reg classement
    sort_key : (lambda reg -> float) = lambda x:x["error_test"] : key for regressions final classment
    ********* Return *********
    error of regressions tested
    ********* Examples *********
    x, y = load_dataset("boston")
    errors = run_all_regressions(x, y)
    errors = run_all_regressions(x, y, x_test=0.1)
    errors = run_all_regressions(x, y, x_test=0.1, final_verbose=range(3))
    errors = run_all_regressions(x, y, x_test=0.1, final_verbose=[True, True, True])
    errors = run_all_regressions(x, y, x_test=0.1, verbose=False)
    sel = Uniform_MAB(1, 100) # Will run 100 tests
    errors = run_all_regressions(x, y, x_test=0.1, verbose=True, selection_algo=sel)
    sel = Uniform_MAB(1, None, 8) # Will run during 8 seconds
    errors = run_all_regressions(x, y, x_test=0.1, verbose=False, selection_algo=sel)
    """
    # We define sort_key
    if (sort_key is None):
        sort_key = lambda x: (x["error_train"] if (x["error_test"] is None) else x["error_test"])
    # We define regs
    if isinstance(regs, int) or isinstance(regs, str):
        regs = get_regressions(regs)
    # We properly define show, ie it will be a list of bool
    show = _verbose_show_proper(len(regs), show)
    verbose = _verbose_show_proper(len(regs), verbose)
    # We properly define test_size
    if isinstance(x_test, int):
        test_size = float(x_test)/x_train.shape[0]
    elif isinstance(x_test, float):
        test_size=x_test
    else:
        test_size=None
    # We run all the regressions following selection_algo
    if any(verbose):
        print("\n\n")
    nbr_ex = 0
    start_time = time.time()
    if selection_algo is None:
        # In this section there are no particular reg selection
        # We separate the train test data if asked of
        if x_test is None:
            x_tr, x_te, y_tr, y_te = (x_train, x_test, y_train, y_test)
        else:
            x_tr, x_te, y_tr, y_te = split_func(x_train, y_train, test_size=test_size, random_state=seed)
        # We try over all regressions
        errors = []
        for ic, sho, verb, reg in zip(range(len(show)), show, verbose, regs):
            nbr_ex += 1
            tr, te, ti = _run_one_regression(x_tr, y_tr, reg, error_func, x_te, y_te, verbose=verb, show=sho, i=ic, debug=debug)
            errors.append({"i":ic, "error_train":tr, "error_test":te, "reg":reg, "time":ti})
    else:
        # In this section we follow the class selection_algo to perform the regressions tests
        selection_algo.set(n_arms=len(regs))
        arm = selection_algo.next_arm()
        if seed is None:
            sd = np.random.randint(1000000)
        else:
            sd = seed+0
        while (arm is not None):
            # We separate the train test data if asked of
            n_draw = len(selection_algo.list_rewards[arm])
            if x_test is None:
                x_tr, x_te, y_tr, y_te = (x_train, x_test, y_train, y_test)
            else:
                x_tr, x_te, y_tr, y_te = split_func(x_train, y_train, test_size=test_size, random_state=sd+n_draw)
            tr, te, ti = _run_one_regression(x_tr, y_tr, regs[arm], error_func, x_te, y_te, verbose[arm], show[arm], i=nbr_ex, debug=debug)
            if save_all_fit_regs:
                selection_algo.update_reward(te, arm=arm, other_data=(tr, ti, sd+n_draw, cp(regs[arm])))
            else:
                selection_algo.update_reward(te, arm=arm, other_data=(tr, ti))
            arm = selection_algo.next_arm()
            nbr_ex += 1
        errors = []
        for ic, tri, te, reg in zip(range(len(regs)), selection_algo.other_data, selection_algo.mean_rewards, regs):
            # If an experiment failed, we set the mean to None
            tr = [j[0] for j in tri]
            if tr and (None not in tr):
                mean_err_tr = np.mean(tr)
            else:
                mean_err_tr = None
            ti = [j[1] for j in tri]
            if ti and (None not in ti):
                mean_err_ti = np.mean(ti)
            else:
                mean_err_ti = None
            if save_all_fit_regs:
                nd = [j[2] for j in tri]
                rg = [j[3] for j in tri]
                errors.append({"i":ic, "error_train":mean_err_tr, "error_test":te, "reg":reg, "seeds":nd, "regs":rg, "time":mean_err_ti})
            else:
                errors.append({"i":ic, "error_train":mean_err_tr, "error_test":te, "reg":reg, "time":mean_err_ti})
    # Now we have finished the tests of the regressions
    # We print the final results obtained
    final_show = _verbose_show_proper(nbr_ex, final_show)
    final_verbose = _verbose_show_proper(nbr_ex, final_verbose)
    if any(verbose) or any(final_verbose):
        print("\nFinished running {} examples in {} seconds\n".format(nbr_ex, time.time() - start_time))
    if any(final_verbose) or any(final_show):
        errors_sorted = [e for e in errors if e["time"] is not None]
        errors_sorted = sorted(errors_sorted, key=sort_key)
        for ic, ss, sho, verb in zip(range(len(final_show)), errors_sorted, final_show, final_verbose):
             if verb or sho:
                 _run_one_regression(x_train, y_train, ss["reg"], error_func, verbose=verb, show=sho, i=ic, debug=debug, _error_test=ss["error_test"], _run_time=ss["time"])
    return errors
Exemple #52
0
def get_dirs_check_reconalled(output_dir, subject_list, session_list):
    """
    Get the info from subjects_visits_tsv, like subject_dir, subject_id,
    subject_list, session_list. Also, this function checks out the rerun of
    the dataset, if the subject result folder has been created, you should
    check out the result and decide if you are going to rerun it or just
    ignore it.

    Args:
        output_dir: CAPS directory to contain the output
        subject_list:  a list containing all the participant_id
        session_list: a list containing all the session_id

    Returns: the related lists based on the tsv files
    """
    import os
    import errno
    from copy import deepcopy as cp
    import subprocess
    from clinica.utils.stream import cprint

    # subject_id, subject_list and session_list
    subject_id = list(subject_list[i] + '_' + session_list[i]
                      for i in range(len(subject_list)))
    subject_id_without_reconalled = cp(subject_id)
    subject_list_without_reconalled = cp(subject_list)
    session_list_without_reconalled = cp(session_list)

    # output_path is the path to CAPS
    output_path = os.path.expanduser(
        output_dir)  # change the relative path to be absolute path
    output_dir = os.path.join(output_path, 'subjects')

    try:
        os.makedirs(output_dir)
    except OSError as exception:
        if exception.errno != errno.EEXIST:  # if the error is not exist error, raise, otherwise, pass
            raise

    # subject_dir is the real path to FreeSurfer output path
    subject_dir = []
    subject_dir_without_reconalled = []

    for i in range(len(subject_list)):
        subject = os.path.join(output_dir, subject_list[i], session_list[i],
                               't1', 'freesurfer_cross_sectional')
        try:
            os.makedirs(subject)
        except OSError as exception:
            if exception.errno != errno.EEXIST:
                raise

        # add the FS path into a list with all subjects and check the recon-all.log file to see if this subject have been run recon-all successfully.
        subject_dir.append(subject)
        subject_path = os.path.join(subject, subject_id[i])
        subject_path_abs = os.path.expanduser(subject_path)
        # check the recon-all.log
        log_file = os.path.join(subject_path_abs, 'scripts', 'recon-all.log')
        if os.path.isfile(log_file):
            last_line = subprocess.check_output(['tail', '-1', log_file])
            if b'finished without error' in last_line:
                cprint(
                    "Skipping %s (FreeSurfer segmentation without error was found)"
                    % subject_id[i])
                subject_id_without_reconalled.remove(subject_id[i])
                subject_list_without_reconalled.remove(subject_list[i])
                session_list_without_reconalled.remove(session_list[i])
            else:
                subject_dir_without_reconalled.append(subject)
        else:
            subject_dir_without_reconalled.append(subject)

    return subject_dir, subject_id, subject_dir_without_reconalled, subject_id_without_reconalled, subject_list_without_reconalled, session_list_without_reconalled
Exemple #53
0
def main(conf):
    if conf['method'] not in ['baseline', 'LmixLact', 'Lmix']:
        raise ValueError("method must be baseline, LmixLact or Lmix")

    # Set random seeds both for pytorch and numpy
    th.manual_seed(conf['seed'])
    np.random.seed(conf['seed'])

    # Create experiment folder and save conf file with the final configuration
    os.makedirs(conf['exp_dir'], exist_ok=True)
    conf_path = os.path.join(conf['exp_dir'], 'conf.yml')
    with open(conf_path, 'w') as outfile:
        yaml.safe_dump(conf, outfile)

    # Load test set. Be careful about is_wav!
    test_set = musdb.DB(root=conf['musdb_path'], subsets=["test"], is_wav=True)

    # Randomly choose the indexes of sentences to save.
    ex_save_dir = os.path.join(conf['exp_dir'], 'examples/')
    if conf['n_save_ex'] == -1:
        conf['n_save_ex'] = len(test_set)
    save_idx = random.sample(range(len(test_set)), conf['n_save_ex'])

    # If stop_index==-1, evaluate the whole test set
    if conf['stop_index'] == -1:
        conf['stop_index'] = len(test_set)

    # prepare data frames
    results_applyact = museval.EvalStore()
    results_adapt = museval.EvalStore()
    silence_adapt = pd.DataFrame({
        'target': [],
        'PES': [],
        'EPS': [],
        'track': []
    })

    # Loop over test examples
    for idx in range(len(test_set)):
        torch.set_grad_enabled(False)
        track = test_set.tracks[idx]
        print(idx, str(track.name))

        # Create local directory
        local_save_dir = os.path.join(ex_save_dir, str(track.name))
        os.makedirs(local_save_dir, exist_ok=True)

        # Load mixture
        mix = th.from_numpy(track.audio).t().float()
        ref = mix.mean(dim=0)  # mono mixture
        mix = (mix - ref.mean()) / ref.std()

        # Load pretrained model
        klass, args, kwargs, state = torch.load(conf['model_path'], 'cpu')
        model = klass(*args, **kwargs)
        model.load_state_dict(state)

        # Handle device placement
        if conf['use_gpu']:
            model.cuda()
        device = next(model.parameters()).device

        # Create references matrix
        references = th.stack([
            th.from_numpy(track.targets[name].audio) for name in source_names
        ])
        references = references.numpy()

        # Get activations
        H = []
        for name in source_names:
            audio = track.targets[name].audio
            H.append(audio)
        H = np.array(H)
        _, bn_ch1, _ = compute_activation_confidence(H[:, :, 0],
                                                     theta=conf['th'],
                                                     hilb=False)
        _, bn_ch2, _ = compute_activation_confidence(H[:, :, 1],
                                                     theta=conf['th'],
                                                     hilb=False)
        activations = th.from_numpy(np.stack((bn_ch1, bn_ch2), axis=2))

        # FINE TUNING
        if conf['method'] != 'baseline':
            print('ADAPTATION')
            torch.set_grad_enabled(True)

            # Freeze layers
            freeze(model.encoder)
            freeze(model.separator, n=conf['frozen_layers'])
            if conf['freeze_decoder']:
                freeze(model.decoder)

            # optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=conf['lr_fine'])
            optimizer = Ranger(filter(lambda p: p.requires_grad,
                                      model.parameters()),
                               lr=conf['lr_fine'])
            loss_func = nn.L1Loss()

            # Initialize writer for Tensorboard
            writer = SummaryWriter(log_dir=local_save_dir)
            for epoch in range(conf['epochs_fine']):
                total_loss = 0
                epoch_loss = 0

                total_rec = 0
                epoch_rec = 0
                total_act = 0
                epoch_act = 0

                if conf['monitor_metrics']:
                    total_sdr = dict([(key, 0) for key in source_names])
                    epoch_sdr = dict([(key, 0) for key in source_names])

                    total_sir = dict([(key, 0) for key in source_names])
                    epoch_sir = dict([(key, 0) for key in source_names])

                    total_sar = dict([(key, 0) for key in source_names])
                    epoch_sar = dict([(key, 0) for key in source_names])

                    total_isr = dict([(key, 0) for key in source_names])
                    epoch_isr = dict([(key, 0) for key in source_names])

                # Data loader with eventually data augmentation
                mix_set = DAdataloader(mix.numpy(),
                                       win=conf['win_fine'],
                                       hop=conf['hop_fine'],
                                       sample_rate=conf['sample_rate'],
                                       n_observations=conf['n_observations'],
                                       pitch_list=conf['pitch_list'],
                                       min_semitones=conf['min_semitones'],
                                       max_semitones=conf['max_semitones'],
                                       same_pitch_list_all_chunks=conf[
                                           'same_pitch_list_all_chunks'])

                # Iterate over chuncks
                for t, item in enumerate(mix_set):
                    sample, win, _ = item
                    mix_chunk = th.from_numpy(sample[None, :, :]).to(device)
                    est_chunk = model(cp(mix_chunk))

                    act_chunk = activations[None, :,
                                            win, :].transpose(3, 2).to(device)
                    loss_act = loss_func(est_chunk * (1 - act_chunk),
                                         torch.zeros_like(est_chunk))

                    if conf['method'] == 'LmixLact':
                        loss_rec = loss_func(
                            mix_chunk, torch.sum(est_chunk * act_chunk, dim=1))
                        loss = loss_rec + conf['gamma'] * loss_act

                    if conf['method'] == 'Lmix':
                        loss_rec = loss_func(mix_chunk,
                                             torch.sum(est_chunk, dim=1))
                        loss = loss_rec

                    loss.backward()
                    optimizer.step()
                    optimizer.zero_grad()

                    total_loss += loss.item()
                    epoch_loss = total_loss / (1 + t)

                    total_rec += loss_rec.item()
                    total_act += loss_act.item()

                    epoch_rec = total_rec / (1 + t)
                    epoch_act = total_act / (1 + t)

                    # Monitor sdr, sir, and sar over epochs
                    if conf['monitor_metrics']:
                        ref_chunk = references[:, win, :]
                        skip = False
                        for i, target in enumerate(source_names):
                            if np.sum(ref_chunk[i, :, :]**2) == 0:
                                skip = True
                        if not skip:
                            sdr, isr, sir, sar = museval.evaluate(
                                ref_chunk,
                                est_chunk.squeeze().transpose(
                                    1, 2).detach().cpu().numpy(),
                                win=np.inf)

                            sdr = np.array(sdr)
                            sir = np.array(sir)
                            sar = np.array(sar)
                            isr = np.array(isr)

                            for i, target in enumerate(source_names):
                                total_sdr[target] += sdr[i]
                                epoch_sdr[target] = total_sdr[target] / (1 + t)

                                total_sir[target] += sir[i]
                                epoch_sir[target] = total_sir[target] / (1 + t)

                                total_sar[target] += sar[i]
                                epoch_sar[target] = total_sar[target] / (1 + t)

                                total_isr[target] += isr[i]
                                epoch_isr[target] = total_isr[target] / (1 + t)

                if conf['monitor_metrics']:
                    for i, target in enumerate(source_names):
                        writer.add_scalar("SDR/" + target, epoch_sdr[target],
                                          epoch)
                        writer.add_scalar("SIR/" + target, epoch_sir[target],
                                          epoch)
                        writer.add_scalar("SAR/" + target, epoch_sar[target],
                                          epoch)
                        writer.add_scalar("ISR/" + target, epoch_isr[target],
                                          epoch)

                writer.add_scalar("Loss/total", epoch_loss, epoch)
                writer.add_scalar("Loss/rec", epoch_rec, epoch)
                writer.add_scalar("Loss/act", epoch_act, epoch)
                print('epoch, nr of training examples and loss: ', epoch, t,
                      epoch_loss, epoch_rec, epoch_act, epoch_sdr['other'])

            writer.flush()
            writer.close()

        # apply model
        print('Apply model')
        estimates = apply_model(model,
                                mix.to(device),
                                shifts=conf['shifts'],
                                split=conf['split'])
        estimates = estimates * ref.std() + ref.mean()
        estimates = estimates.transpose(1, 2).cpu().numpy()

        # get results of this track
        print('Evaluate model')
        assert references.shape == estimates.shape
        track_store, silence_frames = evaluate_mia(ref=references,
                                                   est=estimates,
                                                   track_name=track.name,
                                                   source_names=source_names,
                                                   eval_silence=True,
                                                   conf=conf)

        # aggregate results over the track and save the partials
        silence_adapt = silence_adapt.append(silence_frames, ignore_index=True)
        silence_adapt.to_json(os.path.join(conf['exp_dir'], 'silence.json'),
                              orient='records')

        results_adapt.add_track(track_store)
        results_adapt.save(os.path.join(conf['exp_dir'],
                                        'bss_eval_tracks.pkl'))
        print(results_adapt)

        # Save some examples with corresponding metrics in a folder
        if idx in save_idx:
            silence_frames.to_json(os.path.join(local_save_dir,
                                                'silence_frames.json'),
                                   orient='records')
            with open(os.path.join(local_save_dir, 'metrics_museval.json'),
                      'w+') as f:
                f.write(track_store.json)
            sf.write(os.path.join(local_save_dir, "mixture.wav"),
                     mix.transpose(0, 1).cpu().numpy(), conf['sample_rate'])
            for name, estimate, reference, activation in zip(
                    source_names, estimates, references, activations):
                print(name)

                unique, counts = np.unique(activation, return_counts=True)
                print(dict(zip(unique, counts / (len(activation) * 2) * 100)))

                assert estimate.shape == reference.shape
                sf.write(os.path.join(local_save_dir, name + "_est.wav"),
                         estimate, conf['sample_rate'])
                sf.write(os.path.join(local_save_dir, name + "_ref.wav"),
                         reference, conf['sample_rate'])
                sf.write(os.path.join(local_save_dir, name + "_act.wav"),
                         activation.cpu().numpy(), conf['sample_rate'])

        # Evaluate results when applying the activations to the output
        if conf['apply_act_output']:
            track_store_applyact, _ = evaluate_mia(ref=references,
                                                   est=estimates *
                                                   activations.cpu().numpy(),
                                                   track_name=track.name,
                                                   source_names=source_names,
                                                   eval_silence=False,
                                                   conf=conf)

            # aggregate results over the track and save the partials
            results_applyact.add_track(track_store_applyact)
            print('after applying activations')
            print(results_applyact)

            results_applyact.save(
                os.path.join(conf['exp_dir'], 'bss_eval_tracks_applyact.pkl'))

            # Save some examples with corresponding metrics in a folder
            if idx in save_idx:
                with open(
                        os.path.join(local_save_dir,
                                     'metrics_museval_applyact.json'),
                        'w+') as f:
                    f.write(track_store_applyact.json)

            del track_store_applyact

        # Delete some variables
        del references, mix, estimates, track, track_store, silence_frames, model

        # Stop if reached the limit
        if idx == conf['stop_index']:
            break

        print('------------------')

    # Print and save aggregated results
    print('Final results')
    print(results_adapt)
    method = museval.MethodStore()
    method.add_evalstore(results_adapt, conf['exp_dir'])
    method.save(os.path.join(conf['exp_dir'], 'bss_eval.pkl'))

    if conf['eval_silence']:
        print(
            "mean over evaluation frames, mean over channels, mean over tracks"
        )
        for target in source_names:
            print(
                target + ' ==>',
                silence_adapt.loc[silence_adapt['target'] == target].mean(
                    axis=0, skipna=True))
        silence_adapt.to_json(os.path.join(conf['exp_dir'], 'silence.json'),
                              orient='records')

    print('Final results apply act')
    print(results_applyact)
    method = museval.MethodStore()
    method.add_evalstore(results_applyact, conf['exp_dir'])
    method.save(os.path.join(conf['exp_dir'], 'bss_eval_applyact.pkl'))
Exemple #54
0
def openSeam(sm, dr, cost, qbits, paths, qb_conf, pt_conf, tg_fn, par):
    ''' recursively open seam'''

    global _cells, _numAdj, M, N, L, _qbitAdj, _cell_flags, _qubits, _reserved
    ## erase conflicts

    log('Broken qubits: \n %s \n\n' % str(qb_conf))

    # erase qbit conflicts and update broken paths
    log('Erasing %d broken qubits...\n' % len(qb_conf))
    cell_conf = map(lambda x: _cells[x], qb_conf)
    for qb in qb_conf:
        new_paths = forgetQubit(qb)
        log('%s: %s\n' % (str(qb), str(new_paths)))
        pt_conf.update(new_paths)
        for pt in new_paths:
            if pt in paths:
                paths.pop(pt)
    log(' done\n')

    # erase path conflicts
    log('Erasing %d broken paths...\n' % len(pt_conf))
    for path in pt_conf:
        log('path: %s\n' % str(path))
        forgetPath(path)
    log(' done\n')

    # retain path keys between good qbits
    pt_rp = []
    for key in pt_conf:
        if _cell_flags[key[0]]['placed'] and _cell_flags[key[1]]['placed']:
            pt_rp.append(key)

    ## STORE OLD VALUES AND WIPE SEAM TARGET REGION

    # map of new qbits for each cell
    qbit_dict = {_cells[qb]: tg_fn(qb) for qb in qbits}

    # map of new paths
    path_dict = {key: newPath(key, paths[key], tg_fn) for key in paths}

    # wipe old qubits and paths
    log('Wiping old values...\n')
    log('\t qbits...')
    for qb in qbits:
        forgetQubit(qb, False)
    log('done\n')
    log('\t paths...')
    for key in paths:
        forgetPath(key, False)
    log('done\n')

    # assign new qubits and paths
    log('Assigning new values...\n')
    for cell in qbit_dict:
        assignQubit(cell, qbit_dict[cell])

    assignPaths(path_dict.values())

    # update all reserved qubits
    log('updating all reservations...\n')
    reserveQubits(filter(None, _qubits.values()))

    ## repair broken paths

    # only place paths between moved qubits
    log('Attempting to replace broken routes between moved qubits\n')
    routes = []
    for pt in pt_rp:
        rt = map(lambda x: _qubits[x], pt)
        routes.append(rt)
    cost = Routing.Routing(routes, _reserved, writePath=ROUTE_PATH)

    # check successful routing
    if cost >= Routing.COST_BREAK:
        log('routing failed...\n')
        raise KeyError('Routing failed for paths between moved qbits in \
        seam opening... fix code later')

    # get paths
    log('Routing successfull, preparing to assign new paths\n')
    fixed_paths = cp(Routing.getPaths().values())

    # disable path qubits
    log('Disabling new paths\n')
    qbs = list(set([it for path in fixed_paths for it in path]))
    Routing.disableQubits(qbs)

    # assign paths and update reservations
    log('Assigning new paths \n')
    assignPaths(fixed_paths)

    ## repair qbit placements, should automatically deal with paths

    # order qbits to be placed by decreasing adjacency
    log('Preparing new qbit placements \n')
    cell_ord = sorted(cell_conf, key=lambda x: -_numAdj[x])

    for cell in cell_ord:
        log('cell: %s ...' % str(cell))
        new_qb, new_paths = placeCell(cell)  # recursive call

        # abort on failed placement
        if new_qb is None:
            return False
            raise KeyError('Failed cell %s placement in seam \
            opening' % str(cell))

        # assign qubit and paths
        assignQubit(cell, new_qb)
        assignPaths(new_paths)

        # handle reservations
        reserveQubits([new_qb])

    return True
Exemple #55
0
                                    colors_wERA[:-1]):
            ax.errorbar(bi,
                        mea,
                        yerr=np.vstack([ye]).T,
                        c=col,
                        capsize=5,
                        linewidth=2)

        #filt = (bias != 0.)
        filt = ~np.isnan(bias)
        rcorr = ctl.Rcorr(bias[filt], allpercs['mean'][:-1][filt])
        pears, pval = stats.pearsonr(bias[filt], allpercs['mean'][:-1][filt])
        rcorrsall[(biasnam, nam)] = rcorr
        pearsall[(biasnam, nam)] = (pears, pval)

        filt2 = cp(filt)
        filt2[2] = False
        filt2[3] = False
        pears, pval = stats.pearsonr(bias[filt2], allpercs['mean'][:-1][filt2])
        pears_nocmcc[(biasnam, nam)] = (pears, pval)

        biasoutli = abs(bias - np.nanmean(bias)) > 2 * np.nanstd(bias)
        metroutli = abs(allpercs['mean'][:-1] -
                        np.nanmean(allpercs['mean'][:-1])) > 2 * np.nanstd(
                            allpercs['mean'][:-1])
        if 'res' not in biasnam:
            filt3 = (filt) & ~(biasoutli) & ~(metroutli)
        else:
            filt3 = (filt) & ~(metroutli)
        print(nam, np.sum(filt3), np.sum(filt), filt3[2:4])
        pears, pval = stats.pearsonr(bias[filt3], allpercs['mean'][:-1][filt3])
Exemple #56
0
import pandas as pd
import matplotlib.pyplot as plt
from copy import copy as cp

# simulation parameters definition
n = 51
a = 0.5
nu = 0.1
length = 1.0
Time = 10.0
dt = 1e-3

# initial conditions
U = np.zeros(n)
U[0] = 1.0
Unew = cp(U)
dx = length / float(n - 1)
X = np.linspace(0, length, n)

# creating lists
Uin = []
Uout = []

# matrix formation
mat = np.zeros([n - 2, n])

A = -a / 2.0 / dx - nu / dx**2
B = -1 / dt + 2.0 * nu / dx**2
C = a / 2.0 / dx - nu / dx**2

for i in range(n - 2):
Exemple #57
0
signals['close'] = instr['Close'].rolling(window=1, min_periods=1, center=False).mean()
# Create short simple moving average over the short window
signals['short_mavg'] = instr['Close'].rolling(window=short_window, min_periods=1, center=False).mean()

# Create long simple moving average over the long window
signals['long_mavg'] = instr['Close'].rolling(window=long_window, min_periods=1, center=False).mean()

# Create signals
signals['signal'][short_window:] = np.where(signals['short_mavg'][short_window:]
                                            > signals['long_mavg'][short_window:], 1.0, 0.0)


#print
size = len(signals['close'])
capital = 5000.0
icapital = cp(capital)
buy_flag = True
sell_flag = False
step_sell = 0
step_buy = 0
step_buy_th =  -1
step_sell_th = 6
#step_sell_th =-1
if fprint:
	print
	print 'Date\tClose\tShort_avg\tLong_avg'
for i in range(size):
	date = signals.index[i]
	close = round(signals['close'][i], 2)
	short_mavg= round(signals['short_mavg'][i], 3)
	long_mavg= round(signals['long_mavg'][i], 3)
Exemple #58
0
# computation variables section-------------------------------------------------
Uin = Mach * np.sqrt(g * R * T0)  # inlet velocity

rho = np.linspace(rho0, 0.5 * rho0, nx)  # initializing computational values
u = np.linspace(Uin, 0.5 * Uin, nx)
T = np.linspace(T0, 0.5 * T0, nx)
P = rho * R * T

fid1 = pd.DataFrame({"X":X,"rho":rho,"u":u,"T":T,"P":P},\
                    columns=["X","rho","u","T","P"])
fid1.to_csv("Initial_data.csv", index=None)

dt = 0.05 * dx / Uin  # time step, 0.05 to prevent overflow

U1 = cp(rho)  # building conservational variables
U2 = rho * u
E = R / (g - 1) * T + u**2 / 2
U3 = rho * E

F1, F2, F3 = sc.encoder(U1, U2, U3, g)  # encoding to flux variables

dU11 = np.zeros(nx)
dU12 = cp(dU11)
dU13 = cp(dU11)
dU21 = np.zeros(nx)
dU22 = cp(dU21)
dU23 = cp(dU21)
dU31 = np.zeros(nx)
dU32 = cp(dU31)
dU33 = cp(dU31)
Exemple #59
0
	def hard_weat(self, bias_levels, bias_combinations, subspace_words, sets, neighbors_threshold=1):
		'''
		HardWEAT Debiasing

		Parameters
		----------
		bias_levels: dict | Bias levels for each class respectively
		bias_combinations: dict | Classes and respective subclasses to be included in debiasing, by default in following form : {"gender" : ["male_terms", "female_terms"],   "race": ["black_names", "white_names"], "religion" : ["islam_words", "atheism_words", "christianity_words"]}
		subspace_words: set | Words within the default combination dictionary
		sets: dict | Existing attribute and target set of words
		neighbors_threshold: float | Cosine similarity float threshold for equidistancing phase
		'''
		def_vectors, subcategories_vectors, r_cat= {}, {}, 0.0000000000000000001
		temp_sets = cp(sets)
		def_sets = get_hardweat_sets()

		def_vectors = {bias_category: self.get_center_vector(def_sets[bias_category]) for bias_category in bias_combinations}
		centroid = generate_centroid(scale_bias(bias_levels), def_vectors)
		neutral_words = list(set(self.words) -subspace_words)

		start = time.time()
		print(f'Start of neutralization, there is total of {len(neutral_words)} neutral words out of total {len(self.words)} words.')
		neutral_indices = [self.word_idx[word] for word in neutral_words]
		self.vectors[neutral_indices] = neutralize_vectors(self.vectors[neutral_indices,:], centroid)
		self.normalize_vectors()
		end = time.time();
		print(f'Neutralization done in {round(end-start, 3)}s, starting with neighbor thresholding and equidistancing...')

		start = time.time()
		for key_category in def_vectors:

			subcat_keys_within_this_cat = [x for t in list(bias_combinations[key_category].keys()) for x in t]
			vectors_for_equidistancing = {key: np.zeros(self.dimension) for key in subcat_keys_within_this_cat}
			center_vector = neutralize_vectors(def_vectors[key_category], centroid)
			equidistant_def_subcat_vectors_dict = make_vectors_equidistant(center_vector, vectors_for_equidistancing, r_cat)

			for i, key_subcategory in enumerate(subcat_keys_within_this_cat):
				values_okay, values_not_ok_idx, values_not_ok_idx_max = False, 0, 500
				while not values_okay:

					r_subcat = random.randint(1, 2**16-1)
					new_vectors = make_vectors_equidistant(equidistant_def_subcat_vectors_dict[key_subcategory], {word: self.get_value(word) for word in temp_sets[key_subcategory]}, r_subcat)

					found_artifact = False
					matrix_of_similarities = cs(np.float16(list(new_vectors.values())), np.float16(self.vectors))

					for ravel_idx, cs_value in enumerate(np.ravel(matrix_of_similarities)):
						if (cs_value>neighbors_threshold):
							found_artifact = True
							values_not_ok_idx+=1
							if(values_not_ok_idx % 100==0):
								print(f'{values_not_ok_idx} unsuccessful equdistancing iterations for {key_subcategory}')
							break

					if (found_artifact == True and values_not_ok_idx<values_not_ok_idx_max):
						values_okay = False
					else:
						for key in new_vectors:
							self.vectors[self.get_index_out_of_word(key)] = new_vectors[key]
						values_okay = True
						if (values_not_ok_idx>=values_not_ok_idx_max):
							print(f'Could not perform equidistancing below the requested threshold for {key_subcategory}')

			print(f'Finished with all {key_category} subcategories')

		self.normalize_vectors()
		self.vectors = np.array(self.vectors)
		end = time.time();
		print(f'Equidistancing done in {round(end-start, 3)}s.')
Exemple #60
0
def steps(mat, inst, opp):
    import cubeSfy as cS
    import cubeRot as cR
    from copy import deepcopy as cp

    def move(x, perm):
        if x / 3 == 0:
            cS.u(x % 3 + 1, perm)
        elif x / 3 == 1:
            cS.d(x % 3 + 1, perm)
        elif x / 3 == 2:
            cS.l(x % 3 + 1, perm)
        elif x / 3 == 3:
            cS.r(x % 3 + 1, perm)
        elif x / 3 == 4:
            cS.f(x % 3 + 1, perm)
        elif x / 3 == 5:
            cS.b(x % 3 + 1, perm)
        return perm

    def cond(perm):
        '''
        if not(perm[0][0][0][0]==perm[0][1][0][0] and \
               perm[0][1][0][0]==perm[1][0][0][0] and \
               perm[1][0][0][0]==perm[1][1][0][0]) :
            return False
        if not(perm[0][0][1][0]==perm[0][1][1][0] and \
               perm[0][1][1][0]==perm[1][0][1][0] and \
               perm[1][0][1][0]==perm[1][1][1][0]) :
            return False      
        if not(perm[0][0][0][1]==perm[0][0][1][1] and \
               perm[0][0][1][1]==perm[0][1][0][1] and \
               perm[0][1][0][1]==perm[0][1][1][1]) :
            return False
        if not(perm[1][0][0][1]==perm[1][0][1][1] and \
               perm[1][0][1][1]==perm[1][1][0][1] and \
               perm[1][1][0][1]==perm[1][1][1][1]) :
            return False
        if not(perm[0][0][0][2]==perm[1][0][0][2] and \
               perm[1][0][0][2]==perm[0][0][1][2] and \
               perm[0][0][1][2]==perm[1][0][1][2]) :
            return False
        if not(perm[0][1][0][2]==perm[1][1][0][2] and \
               perm[1][1][0][2]==perm[0][1][1][2] and \
               perm[0][1][1][2]==perm[1][1][1][2]) :
            return False
        

        '''
        plane = 0
        while plane < 3:
            x = 0
            while x < 2:
                y = 0
                while y < 2:
                    z = 0
                    while z < 2:
                        if mat[x][y][z][plane] != mat[0][0][0][plane] and mat[
                                x][y][z][plane] != opp[mat[0][0][0][plane]]:
                            break
                        z = z + 1
                    if z < 2:
                        break
                    y = y + 1
                if y < 2:
                    break
                x = x + 1
            if x == 2:
                print True
                return True
            plane = plane + 1
        #print False, len(perms)
        return False
        '''
        if perm==[[[[0, 1, 2], [3, 2, 0]], [[5, 4, 3], [4, 2, 1]]], [[[3, 4, 2], [5, 4, 1]], [[3, 5, 0], [1, 5, 0]]]]:
            print "Yes"
            return True
        else:
            return False
        '''

    perms = [[-1, mat]]
    if cond(perms[0][1]):
        return inst

    for x in range(0, 18):
        perm = cp(perms[0][1])
        perms.append([x, move(x, perm)])
        perm = cp(perms[x + 1][1])
        if cond(perm):
            return inst

    moves = [
        "U", "U2", "U'", "D", "D2", "D'", "L", "L2", "L'", "R", "R2", "R'",
        "F", "F2", "F'", "B", "B2", "B'"
    ]
    while True:
        prev = (len(perms) - 7) / 12
        start = 6 * ((perms[prev][0] / 6) + 1)
        for x in range(start, start + 12):
            perm = cp(perms[prev][1])
            tag = x % 18
            perms.append([tag, move(tag, perm)])
            print len(perms)
            if cond(perm):
                prev = -1
                break
        if prev == -1:
            break

    prev = len(perms) - 1
    while prev != 0:
        inst.append(moves[perms[prev][0]])
        prev = (prev - 7) / 12
        perms = perms[:prev + 1]

    for x in range(0, len(inst) / 2):
        temp = inst[x]
        inst[x] = inst[len(inst) - x - 1]
        inst[len(inst) - x - 1] = temp

    return inst