Пример #1
0
 def get_game_state(self):
     if any([cell == 2048 for cell in flatten(self.matrix)]):
         return 'win'
     #If there are empty fields, the game isn't over yet
     if any([cell == 0 for cell in flatten(self.matrix)]):
         return 'not over'
     for i in range(
             len(self.matrix) - 1
     ):  #intentionally reduced to check the row on the right and below
         for j in range(
                 len(self.matrix[0]) - 1
         ):  #more elegant to use exceptions but most likely this will be their solution
             if self.matrix[i][j] == self.matrix[
                     i + 1][j] or self.matrix[i][j +
                                                 1] == self.matrix[i][j]:
                 return 'not over'
     for k in range(len(self.matrix) -
                    1):  #to check the left/right entries on the last row
         if self.matrix[len(self.matrix) -
                        1][k] == self.matrix[len(self.matrix) - 1][k + 1]:
             return 'not over'
     for j in range(len(self.matrix) -
                    1):  #check up/down entries on last column
         if self.matrix[j][len(self.matrix) -
                           1] == self.matrix[j + 1][len(self.matrix) - 1]:
             return 'not over'
     return 'lose'
Пример #2
0
def home():
    summary, temp = get_weather()
    user_id = current_user.id
    friends_objects = User.query.filter_by(id=user_id).first().friends

    notes_objects = User.query.filter_by(id=user_id).first().notes

    all_receivables = flatten(
        [friend.receivables for friend in friends_objects])
    all_commitments = flatten(
        [friend.commitments for friend in friends_objects])

    money = True if all_receivables or all_commitments else False

    receivables = flatten([friend.receivables for friend in friends_objects])
    commitments = flatten([friend.commitments for friend in friends_objects])

    receivable_sum = round(
        sum(receivable.amount for receivable in all_receivables), 2)
    commitment_sum = round(
        sum(commitment.amount for commitment in all_commitments), 2)
    money_summary = round(abs(receivable_sum - commitment_sum), 2)

    sums = (receivable_sum, commitment_sum, money_summary)

    return render_template('home.html',
                           summary=summary,
                           temp=temp,
                           notes=notes_objects,
                           money=money,
                           receivables=receivables,
                           commitments=commitments,
                           sums=sums)
Пример #3
0
def sound_similarity_phrase(phrase1, phrase2):
    '''
    Detects that two phrases sound similar.
    '''
    phrase1_flat = flatten(split_in_syllables(phrase1))
    phrase2_flat = flatten(split_in_syllables(phrase2))
    if len(phrase1_flat) != len(phrase2_flat):
        return 0
    else:
        syllable_similarity_coefs = []
        for syl1, syl2 in zip(phrase1_flat, phrase2_flat):
            syllable_similarity_coefs.append(
                sounds_similar_syllable(syl1, syl2))
        #print(syllable_similarity_coefs)
        return sum(syllable_similarity_coefs) / len(syllable_similarity_coefs)


#print(sound_similarity_phrase('те дисам ко на писта', 'ве мислам гомна глиста'))
#print(sound_similarity_phrase('вештина ја најде', 'грешлива на тајмер'))
#print(sound_similarity_phrase('вештина ја најде', 'њук е многу добар'))
#print(sound_similarity_phrase('бараш вештина ја најде', 'кур на главата е рапер'))
#print(sound_similarity_phrase('некој друг е виновен', 'ди нов совет мировен'))
#print(sound_similarity_phrase('пеење', 'пењата'))

#print(split_in_syllables('пеење'))
# print(split_in_syllables('вештина ја најде'))
# print(split_in_syllables('грешлива на тајмер'))
Пример #4
0
def _eval(model: Model, x: t.Tuple[torch.Tensor, torch.Tensor, torch.Tensor]) -> t.Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
    """
    Evaluates model
    :param model: model to eval
    :param x: tuple [input, YMin, YMax]
    :return: loss, predicts and true labels
    """
    X, Y = _unpack_data(x)
    with torch.no_grad():
        pred = model(X.unsqueeze(-1).unsqueeze(0).float())
        loss = nn.functional.cross_entropy(flatten(pred), Y.flatten(),
                                           weight=torch.Tensor([2e-1, 1, 1]).to(pred.device))
    return loss, flatten(pred).argmax(dim=-1), Y.flatten()
Пример #5
0
    def get_state(self, **kwargs):
        """ We request positions, orientations, speeds, and emissions
        of observable vehicles.

        (See parent class for more information)"""
        # We select beta observable vehicles and exclude inflows
        ids = self.get_observable_veh_ids()

        vel = [self.k.vehicle.get_speed(veh_id) for veh_id in ids]
        orientation = [
            self.k.vehicle.get_orientation(veh_id) for veh_id in ids
        ]
        emission = [
            self.k.vehicle.kernel_api.vehicle.getCO2Emission(id) for id in ids
        ]

        tl = np.concatenate(
            [self.encode_tl_state(id) for id in self.get_controlled_tl_ids()])

        # We pad the state in case a vehicle is being respawned to prevent
        # dimension related exceptions
        vel = pad_list(vel, self.model_params["beta"], 0.)
        orientation = pad_list(orientation, self.model_params["beta"],
                               [0., 0., 0.])
        emission = pad_list(emission, self.model_params["beta"], 0.)

        return np.concatenate((flatten(orientation), vel, emission, tl))
Пример #6
0
def cleanSources( uncleanStore = settings.UNCLEAN_STORE, numWorkers = settings.MAX_WORKERS ):
    """Cleans all files in unclean directory, using numWorkers processors.
    
    :param numWorkers: Number of processors to allocate. Defaults to :py:func:`multiprocessing.cpu_count`
    
    Call this function directly to clean data. For example:
    
    >>> cleanSources( settings.UNCLEAN_STORE )
    Cleaning data in <C:\AFPunclean> with <8> workers.
    
    """
    pool = mp.Pool( numWorkers )
    print "Cleaning data in <%s> with <%d> workers." % ( uncleanStore, numWorkers )
    
    def clean( source ):
        try:
            filer = filers.BatchFiler( schema.getSchema( source ) )
        except Exception as e:
            return [e]
        sourceDir = os.path.join( settings.UNCLEAN_STORE, source )
        uncleaned = ( ( filer, sourceDir, uncleaned ) for uncleaned in os.listdir( sourceDir ) )
        return pool.map( _cleanFile, uncleaned )
    results = helpers.flatten( [ clean( source ) for source in os.listdir( uncleanStore ) ] )
    added = [ result for result in results if result.added ]
    notAdded = [ result for result in results if not result.added ]
    return { "Added" : added, "Unable To Add" : notAdded }
Пример #7
0
def _train_step(model: Model, opt: optim.Optimizer,
                x: t.Tuple[torch.Tensor, torch.Tensor, torch.Tensor]) -> t.Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
    """
    Performs one step of training
    :param model: model to train
    :param opt: optimizer
    :param x: tuple [input, YMin, YMax]
    :return: loss, predicts and true labels
    """
    X, Y = _unpack_data(x)
    pred = model(X.unsqueeze(-1).float())
    loss = nn.functional.cross_entropy(flatten(pred), Y.flatten(),
                                       weight=torch.Tensor([2e-1, 1, 1]).to(pred.device))
    loss.backward()
    opt.step()
    opt.zero_grad()
    return loss, flatten(pred).argmax(dim=-1), Y.flatten()
Пример #8
0
 def add_random_digit(self):
     if not any([cell == 0 for cell in flatten(self.matrix)]):
         #No place available to add
         return
     a=randint(0,self.y_dim-1)
     b=randint(0,self.x_dim-1)
     while(self.matrix[a][b]!=0):
         a=randint(0,self.y_dim-1)
         b=randint(0,self.x_dim-1)
     digit = choice([2]*9 +[4])
     self.matrix[a][b]=digit
Пример #9
0
 def get(self, paths, flat=True):
     try:
         output = {}
         with open(self.filename, 'rb') as f:
             l = yaml.safe_load(f.read())
         for path in paths:
             if path.strip('/'):
                 output = merge(output, search(l, path))
             else:
                 return flatten(l) if flat else l
         return flatten(output) if flat else output
     except IOError as e:
         print(e, file=sys.stderr)
         if e.errno == 2:
             print("Please, run init before doing plan!")
         sys.exit(1)
     except TypeError as e:
         if 'object is not iterable' in e.args[0]:
             return dict()
         raise
Пример #10
0
 def add_random_digit(self):
     if not any([cell == 0 for cell in flatten(self.matrix)]):
         # No place available to add
         return
     a = randint(0, self.y_dim - 1)
     b = randint(0, self.x_dim - 1)
     while self.matrix[a][b] != 0:
         a = randint(0, self.y_dim - 1)
         b = randint(0, self.x_dim - 1)
     digit = 2 if random() < 0.9 else 4
     self.matrix[a][b] = digit
Пример #11
0
def get_logic_usage(ltext):

    results = {}

    logic_lines = ltext.strip().split("\n")
    results['LINES'] = len(logic_lines)

    # lines without comments
    lines_less_comments = [removeComment(l) for l in logic_lines]

    # lines without blank or comment lines
    lines_nb_nc = remove_empty(lines_less_comments)
    results['LINES_UNCOMMENTED'] = len(lines_nb_nc)

    tokenised_lines = [[l.split(" ")] for l in lines_less_comments]

    raw_line_elements = []
    logic_elements = []
    residual_elements = []

    for line in logic_lines:
        [int_eqn_elements, int_logic_elements,
         int_residual_elements] = getLineComponents(line, keepNumbers=False)

        raw_line_elements.append(int_eqn_elements)
        logic_elements.append(int_logic_elements)
        residual_elements.append(int_residual_elements)

    # We are only interested in totals, so flatten the array and take only unique values
    logic_elements = unique(flatten(logic_elements))
    residual_elements = unique(flatten(residual_elements))

    logic_elements.sort()
    residual_elements.sort()

    for key, value in RDBOperatorsConst.TYPES.items():
        search_string = getVariableRegex(value[0])
        count = comp_count(logic_elements, search_string)
        results[key] = str(count)

    return [results, logic_elements, residual_elements]
Пример #12
0
def get_logic_usage(ltext):

    results = {}

    logic_lines = ltext.strip().split("\n")
    results['LINES'] = len(logic_lines)

    # lines without comments
    lines_less_comments = [removeComment(l) for l in logic_lines]

    # lines without blank or comment lines
    lines_nb_nc = remove_empty(lines_less_comments)
    results['LINES_UNCOMMENTED'] = len(lines_nb_nc)

    tokenised_lines = [[l.split(" ")] for l in lines_less_comments]

    raw_line_elements = []
    logic_elements = []
    residual_elements = []

    for line in logic_lines:
        [int_eqn_elements, int_logic_elements, int_residual_elements] = getLineComponents(line,keepNumbers=False)

        raw_line_elements.append(int_eqn_elements)
        logic_elements.append(int_logic_elements)
        residual_elements.append(int_residual_elements)

    # We are only interested in totals, so flatten the array and take only unique values
    logic_elements = unique(flatten(logic_elements))
    residual_elements = unique(flatten(residual_elements))

    logic_elements.sort()
    residual_elements.sort()

    for key, value in RDBOperatorsConst.TYPES.items():
        search_string = getVariableRegex(value[0])
        count = comp_count(logic_elements, search_string)
        results[key] = str(count)

    return [results, logic_elements, residual_elements]
Пример #13
0
    def get(self, paths=['/'], flat=True):
        paginator = self.ssm.get_paginator('get_parameters_by_path')
        output = {}
        for path in paths:
            for page in paginator.paginate(Path=path,
                                           Recursive=True,
                                           WithDecryption=True):
                for param in page['Parameters']:
                    add(obj=output,
                        path=param['Name'],
                        value=self._read_param(param['Value'], param['Type']))

        return flatten(output) if flat else output
Пример #14
0
 def draw_tool_position(self, image, final=False, blink_phase=True):
     pc = copy(self.polygon_coords)
     if not final: pc.append(copy(self.board.coords))
     self.c.load_image(image)
     color = self.c.default_color if blink_phase else self.c.background_color
     if len(pc) > 2:
         self.c.polygon( pc , outline=color)
     elif len(pc) == 2:
         self.c.line( flatten(pc) , fill=color)
     elif len(pc) == 1:
         self.c.point( pc[0] , fill=color)
     self.c.point(self.board.coords, fill=color)
     return self.c.get_image()
Пример #15
0
 def get_game_state(self):
     if any([cell == 2048 for cell in flatten(self.matrix)]) and not self.has_won:
         # remember that user has already got a 2048 tile, to avoid displaying 'you won' each time.
         self.has_won = True
         return 'win'
     # If there are empty fields, the game isn't over yet
     if any([cell == 0 for cell in flatten(self.matrix)]):
         return 'not over'
     for i in range(len(self.matrix) - 1):  # intentionally reduced to check the row on the right and below
         for j in range(len(
                 self.matrix[0]) - 1):  # more elegant to use exceptions but most likely this will be their solution
             if self.matrix[i][j] == self.matrix[i + 1][j] or self.matrix[i][j + 1] == self.matrix[i][j]:
                 return 'not over'
     for k in range(len(self.matrix) - 1):  # to check the left/right entries on the last row
         if self.matrix[len(self.matrix) - 1][k] == self.matrix[len(self.matrix) - 1][k + 1]:
             return 'not over'
     for j in range(len(self.matrix) - 1):  # check up/down entries on last column
         if self.matrix[j][len(self.matrix) - 1] == self.matrix[j + 1][len(self.matrix) - 1]:
             return 'not over'
     if self.has_won:
         # this happens if the field is full but the user got a 2048 tile once.
         return 'win'
     return 'lose'
Пример #16
0
    def consolidate_attribute(self, attribute_name):
        # type: (str) -> None
        attr_value = getattr(self, attribute_name)
        attr_value = flatten(attr_value)
        attr_value = list(
            set([i.strip() for i in attr_value
                 if isinstance(i, basestring)]))  # removes exact duplicates

        attr_value[:] = [
            x for x in attr_value if
            not self._is_contained_in_other_element_of_the_list(x, attr_value)
        ]

        setattr(self, attribute_name, list(set(attr_value)))
Пример #17
0
    def orientations(self, ids):
        """Encodes vehicle orientation into a vector representation.
        The orientation for each vehicle is a 3-vector encoding the
        cartesian x and y coordinates along with an angle.

        Parameters
        ----------
        ids: List<String>
            List of vehicle ids to encode in orientation in vector.

        Returns
        -------
        encoded_state: List<Float> of length `3 * len(ids)`
             Encoded orientations in same order as `ids`."""
        odict = self._get_odict([0., 0., 0.])
        for id in ids:
            odict[id] = self.k.vehicle.get_orientation(id)
        return flatten(self._odict_to_list(odict))
Пример #18
0
def process_url(tokens, outputs, lang):
    """
    The function is used to process the spoken form of every URL in an example

    Args:
        tokens: The tokens of the written form
        outputs: The expected outputs for the spoken form
        lang: Selected language.
    Return:
        outputs: The outputs for the spoken form with preprocessed URLs.
    """
    if lang == constants.ENGLISH:
        for i in range(len(tokens)):
            t, o = tokens[i], outputs[i]
            if o != constants.SIL_WORD and '_letter' in o:
                o_tokens = o.split(' ')
                all_spans, cur_span = [], []
                for j in range(len(o_tokens)):
                    if len(o_tokens[j]) == 0:
                        continue
                    if o_tokens[j] == '_letter':
                        all_spans.append(cur_span)
                        all_spans.append([' '])
                        cur_span = []
                    else:
                        o_tokens[j] = o_tokens[j].replace('_letter', '')
                        cur_span.append(o_tokens[j])
                if len(cur_span) > 0:
                    all_spans.append(cur_span)
                o_tokens = flatten(all_spans)

                o = ''
                for o_token in o_tokens:
                    if len(o_token) > 1:
                        o += ' ' + o_token + ' '
                    else:
                        o += o_token
                o = o.strip()
                o_tokens = wordninja.split(o)
                o = ' '.join(o_tokens)

                outputs[i] = o

    return outputs
Пример #19
0
    def preprocess_text(self, text):
        use_own_stopwords = False
        text = helper.clean(
            text
        )  # Clean text and remove double spacy and special chars and copyrights.
        doc = self.nlp(text)

        sentences = []
        for sentence in doc.sents:  # Split text into sentences
            chunks = []

            for chunk in sentence.noun_chunks:  # Get noun_chunks out of sentence
                if use_own_stopwords:  # Remove stopwords
                    tokens = chunk.text.lower().split(' ')
                    tokens = list(
                        filter(lambda token: token not in stopwords, tokens))
                else:
                    # chunk = self.nlp(chunk.text.lower())
                    # tokens = [token.text for token in chunk if not token.is_stop]
                    tokens = chunk.text.lower().split(
                        ' '
                    )  # This method is way faster for checking for stopwords
                    tokens = list(
                        filter(lambda token: not self.nlp.vocab[token].is_stop,
                               tokens))

                tokens = list(filter(lambda token: token != '', tokens))
                tokens = list(map(lambda token: lemmatize_word(token),
                                  tokens))  # Lemmatize

                chunk = ' '.join(tokens)
                chunk = chunk.lower()  # lower all the text

                if chunk != '':
                    chunks.append(chunk)

            if len(chunks) > 0:  # Only append non empty tokens
                sentences.append(chunks)

        return helper.flatten(
            sentences
        )  # Merge words of the sentences together to a document array
Пример #20
0
def build_db(dt):
    # Load the base information.
    viewdict = cache('viewdict', socrata.viewdict)

    dt.drop('dataset', if_exists = True)
    dt.create_table(viewdict.values()[0], 'dataset')
    dt.create_index(['id'], 'dataset', if_not_exists = True, unique = True)

    # Add the join references (shared columns).
    columndict = cache('columndict', socrata.columndict)
    for column, new_viewids in columndict.items():
        for viewid in new_viewids:
            old_viewids = set(viewdict[viewid].get('joinable', []))
            viewdict[viewid]['joinable'] = list(old_viewids.union(new_viewids))

    # Add the union references (shared schema)
    uniondict = cache('uniondict', socrata.uniondict)
    for schema, viewids in uniondict.items():
        for viewid in viewids:
            viewdict[viewid]['schema'] = list(schema)
            viewdict[viewid]['unionable'] = list(viewids)

    # uniondict_broad = socrata.uniondict_broad()
    # generators = cache('generators', write.build_generators)


    # Save to the database.
    for view in viewdict.values():
        # There's a lot of interesting stuff in these fields,
        # but they're big or weakly structured.
        # Remove these because they're big
        for key in ['columns', 'metadata', 'displayFormat', 'viewFilters', 'query']:
            if key in view:
                del(view[key])

        # Flatten
        dt.insert(flatten(view), 'dataset')
    dt.commit()

    return dt
Пример #21
0
def cbc_decrypt(ct, cipher, iv):
    """
    
    Decrypt ciphertext bytes in CBC mode

    Arguments:
    ct     -- Ciphertext bytes to decrypt
    cipher -- Cipher object used to decrypt (must expose decrypt(ciphertext)
              method and block_size member)
    iv     -- Initializion vector
    padf   -- Padding function called after decryption. Must take two 
              arguments: bytes to pad and block size, and it should return the
              padded bytes.

    Returns:
              Bytes encrypted in CBC mode
    
    """

    pt = []
    ct = [iv] + chunks(ct, cipher.block_size)
    for i in range(1, len(ct)):
        pt += [fixed_xor(ct[i-1], cipher.decrypt(ct[i]))]
    return flatten(pt)
Пример #22
0
def cbc_encrypt(pt, cipher, iv):
    """

    Encrypt plaintext bytes in CBC mode

    Arguments:
    pt     -- Plaintext bytes to encrypt
    cipher -- cipher object used to encrypt (must expose encrypt(ciphertext)
              method and block_size member)
    iv     -- Initializion vector
    padf   -- Padding function to called before encryption. Must take two
              arguments: bytes to pad and block size, and it should return the
              padded bytes.

    Returns:
              Bytes encrypted in CBC mode
    
    """

    ct = [iv]
    pt = chunks(pt, cipher.block_size)
    for i in range(len(pt)):
        ct += [cipher.encrypt(bytes(fixed_xor(pt[i], ct[i])))]
    return flatten(ct[1:])
Пример #23
0
import helpers

data = open('data/day21.txt', 'r').read().splitlines()

allergens = list()
ingredients = list()

for d in data:
    ingredients.append([x.strip() for x in d[:d.find('(') - 1].split()])
    allergens.append(
        [x.strip() for x in d[d.find('contains ') + 9:-1].split(',')])

probables = dict()
for a in helpers.flatten(allergens):
    p = list()
    for c, x in enumerate(allergens):
        if a in x:
            p.append(ingredients[c])
        if p:
            probables[a] = set(p[0])
            for s in p[1:]:
                probables[a].intersection_update(s)

while True:
    unique = list()
    for k in probables.keys():
        if len(probables[k]) == 1:
            unique.extend(list(probables[k]))
    if len(unique) == len(probables.keys()):
        break
    for k, v in probables.items():
Пример #24
0
def createSimpleCleanSamplesDf(df,
                               instForSignal=4,
                               n_samplesSignal=20,
                               n_samplesNoise=20,
                               sampleFreq='10L',
                               truthLookAhead=10,
                               createDfTemp=False,
                               resampleType='last'):
    ''' 
    Only use data from instForJump for features. Only 2 features so can view results easily. Feature is pcPercent 10ms and 20ms in the past.
    
    Sampled at some frequency (ie 10ms) in the past. 
    
    Inputs
       instForSignal:   index intrument we are intrested in
       n_samplesSignal: samples where truth is -1 or 1
       n_samplesNoise:  samples where truth is 0
       sampleFreq:      sample data on fixed grid. Units are below.
       truthLookAhead:  how many sampleFreq points to look for signal. ie if sampleFreq=10L and truthLookAhead = 10 we look 100ms ahead for a change in price.
       createDfTemp:    create debug output (slow)
       resampleType:    last, mean, sum (might capture many updates)
    
    Outputs 
        dataFeatures: [n_samples, n_features].    features = pcPercent 10 and 20 ms in the past before truth event. 
        truthData:    [n_samples].   0,1,-1 for no change, change down, change up
        dfTemp:       subset of df we used to create features. For debugging.
    
    
    Assume if I miss an instrument when resampling I will see the change eventually.
    
    H   hourly frequency
    T   minutely frequency
    S   secondly frequency
    L   milliseconds
    U   microseconds
    
    TODO: 
         Check for only one change in signal area to avoid flickering?
         Should feature be mean or min/max rather than ffill?
        
        
    '''

    idxStart = []
    idxSeen = []
    dataFeatures = []
    truthData = []
    numNoJump = 0
    numJump = 0

    dfTemp = pd.DataFrame()
    dfSample = df[df['instNum'] ==
                  instForSignal]  # only look at data for instForSignal
    dfSample = dfSample.resample(sampleFreq).last().ffill()

    dfSampleForOutput = pd.DataFrame()
    if resampleType == 'last':
        dfSampleForOutput = dfSample.resample(sampleFreq).last().ffill()
    elif resampleType == 'mean':
        dfSampleForOutput = dfSample.resample(sampleFreq).mean().ffill()
    elif resampleType == 'sum':
        dfSampleForOutput = dfSample.resample(sampleFreq).sum().ffill()

    np.random.seed(1001)
    indexRandom = np.random.randint(dfSample.shape[0] - truthLookAhead * 10,
                                    size=500000)  #create a lot of candidates

    # Check for each indexRandom that we try to find a non-overlapping n_samplesSignal and n_samplesNoise

    npts = 2  # number of time steps to look for features
    totNpts = npts + truthLookAhead  # number of time steps to look for signal

    for i in range(indexRandom.shape[0]):
        idx = indexRandom[i]

        dfFirstSegmentInst = dfSample.iloc[idx:idx + npts][['pB', 'pA']]
        dfSecondSegmentInst = dfSample.iloc[idx + npts:idx +
                                            totNpts][['pB', 'pA']]

        #Pdb().set_trace()

        pBStart = dfFirstSegmentInst.iloc[0]['pB']
        pAStart = dfFirstSegmentInst.iloc[0]['pA']

        noChangeFirst = True
        if np.sum(np.diff(dfFirstSegmentInst['pB'])) > 1.e-8 or np.sum(
                np.diff(dfFirstSegmentInst['pA'])) > 1.e-8:
            noChangeFirst = False

        noChangeSecond = True
        if np.sum(np.diff(dfSecondSegmentInst['pB'])) > 1.e-8 or np.sum(
                np.diff(dfSecondSegmentInst['pA'])) > 1.e-8:
            noChangeSecond = False

        # Noise
        if (idxSeen.count(idx) == 0 and idxSeen.count(idx + totNpts - 1) == 0
                and  # no overlapping segments
                numNoJump < n_samplesNoise and
                noChangeFirst == True and noChangeSecond == True):

            numNoJump = numNoJump + 1
            features = flatten(dfSampleForOutput.iloc[idx:idx + npts][[
                'pcPercent'
            ]].values.tolist())
            dataFeatures.append(features)
            truthData.append(0)
            #Pdb().set_trace()
            idxStart.append(idx)
            for z in range(idx, idx + totNpts):
                idxSeen.append(z)
            if createDfTemp:
                dfTemp = dfTemp.append(dfSample.iloc[idx:idx + totNpts])

        # Signal
        if (idxSeen.count(idx) == 0 and idxSeen.count(idx + totNpts - 1) == 0
                and  # no overlapping segments
                numJump < n_samplesSignal and noChangeFirst == True and
            (pBStart < dfSecondSegmentInst['pB'].max() - 1.e-8
             and pAStart < dfSecondSegmentInst['pA'].max() - 1.e-8)
                or (pBStart > dfSecondSegmentInst['pB'].min() + 1.e-8
                    and pAStart > dfSecondSegmentInst['pA'].min() + 1.e-8)):

            numJump = numJump + 1
            features = flatten(dfSampleForOutput.iloc[idx:idx + npts][[
                'pcPercent'
            ]].values.tolist())
            dataFeatures.append(features)
            if pBStart < dfSecondSegmentInst['pB'].max() - 1.e-8:
                truthData.append(1)
            if pBStart > dfSecondSegmentInst['pB'].min() + 1.e-8:
                truthData.append(-1)
            idxStart.append(idx)
            for z in range(idx, idx + npts):
                idxSeen.append(z)
            if createDfTemp:
                dfTemp = dfTemp.append(dfSample.iloc[idx:idx + totNpts])
            print('numJump = {} nonJump = {}\n'.format(numJump, numNoJump))

        if numJump >= n_samplesSignal and numNoJump >= n_samplesNoise:
            break

        if (i % 10000 == 0):
            print('i: {}\n'.format(i))

    return dataFeatures, truthData, dfTemp
Пример #25
0
    def do_firstboot(self):
        firstboot_actions = cm.am.get_firstboot_actions()
        # Skipping actions that shouldn't be run in an emulator
        if is_emulator():
            firstboot_action_names = [
                name for name, action in firstboot_actions.items()
                if not action.not_on_emulator
            ]
        else:
            firstboot_action_names = list(firstboot_actions.keys())
        firstboot_file, is_new_file = self.get_firstboot_file()
        if firstboot_file is None:
            logger.error(
                "Can't read/create a firstboot file, no sense for the firstboot application to continue"
            )
            return
        completed_action_names = []
        failed_action_names = []
        skipped_action_names = []
        if not is_new_file:
            completed_action_names = []
            completed_actions = self.get_completed_actions_from_file(
                firstboot_file)
            for action in completed_actions:
                if isinstance(action, basestring):
                    completed_action_names.append(action)
                elif isinstance(action, dict):
                    action_name = action["action"]
                    action_result = action["status"]
                    if action_result == "success":
                        # If action has been recorded as failed/skipped before and later was marked as successful
                        # we don't need to count the fails/skips in
                        while action_name in skipped_action_names:
                            skipped_action_names.remove(action_name)
                        while action_name in failed_action_names:
                            failed_action_names.remove(action_name)
                        completed_action_names.append(action_name)
                    elif action_result == "fail":
                        # if action failed before, it being skipped before/after is irrelevant
                        failed_action_names.append(action_name)
                        while action_name in skipped_action_names:
                            skipped_action_names.remove(action_name)
                    elif action_result == "skip":
                        if action_name not in failed_action_names:
                            skipped_action_names.append(action_name)
        non_completed_action_names = [
            n for n in firstboot_action_names if
            n not in completed_action_names and n not in skipped_action_names
        ]
        # print(non_completed_action_names, skipped_action_names, failed_action_names, completed_action_names)
        if non_completed_action_names:
            if is_new_file or (not completed_action_names
                               and not skipped_action_names
                               and not failed_action_names):
                # first boot, no info whatsoever yet
                message = "Let's go through first boot setup!"
            elif not completed_action_names and not failed_action_names:  # Not the first boot - some actions have been completed before
                message = "New setup actions for your ZP found!"
            elif failed_action_names and set(completed_action_names) == set(
                    failed_action_names
            ):  # Some actions have not been successfully completed
                message = "Want to retry failed first boot actions?"
            else:  # ought to make a truth table, I guess
                message = "New setup actions for your ZP found!"
            if not self.context.request_exclusive():
                logger.error("Can't get an exclusive context switch, exiting")
                return
            choice = DialogBox('yn',
                               self.i,
                               self.o,
                               message=message,
                               name="Firstboot wizard setup menu").activate()
            if not choice:
                self.context.rescind_exclusive()
                return
            else:
                # User confirmed that they want to go through with the firstboot wizard
                # Let's sort the actions and resolve their dependencies
                # For that, we need some storage variables.
                # Here, we store actions by their fullname, sorted in order to
                # resolve the dependency problems
                sorted_actions = OrderedDict()
                # Here, we store lists of actions that depend on some other action,
                # sorted by the execution order (after resolving the dependencies)
                action_dependants = {}
                # Here, we store action fullnames (provider+separator+name)
                # by their short names (just action name, no 'provider' appended)
                # because short names are used in dependencies.
                action_fullname_by_name = {}

                def get_prov_and_name(action_fullname):
                    return action_fullname.split(cm.am.action_name_delimiter,
                                                 1)

                # First, creating a lookup table for looking up dependencies
                for action_fullname in firstboot_action_names:
                    _, action_name = get_prov_and_name(action_fullname)
                    action_fullname_by_name[action_name] = action_fullname
                # Then, compiling the list of actions that depend on other action
                for action_fullname in non_completed_action_names:
                    _, action_name = get_prov_and_name(action_fullname)
                    action = firstboot_actions[action_fullname]
                    if action.depends:
                        has_unresolved_dependencies = False
                        for dependency in action.depends:
                            dep_fullname = action_fullname_by_name.get(
                                dependency, None)
                            if dep_fullname is None:
                                logger.error(
                                    "Dependency {} for action {} is not found!"
                                    .format(dep_fullname, action_name))
                                continue
                            if dep_fullname in non_completed_action_names:
                                has_unresolved_dependencies = True
                                # dependency hasn't been completed yet
                                if dep_fullname in action_dependants:
                                    action_dependants[dep_fullname].append(
                                        action_fullname)
                                else:
                                    action_dependants[dep_fullname] = [
                                        action_fullname
                                    ]
                            else:
                                logger.info(
                                    "Dependency {} (for action {}) is already completed!"
                                    .format(dep_fullname, action_name))
                        if not has_unresolved_dependencies:
                            # No non-completed dependencies have been found
                            # so, we can just add the action to the list
                            sorted_actions[action_fullname] = action
                    else:
                        # Action doesn't depend on anything, just adding it to the list
                        sorted_actions[action_fullname] = action
                # This code untangles an arbitrarily long chain of dependencies
                # except, well, circular dependencies
                actions_involved_in_dependencies = []
                if action_dependants:
                    original_dependants = copy(action_dependants)
                    logger.info(
                        "Resolving dependencies: {}".format(action_dependants))
                    while action_dependants:
                        all_dependency_actions = action_dependants.keys()
                        all_dependent_actions = flatten(
                            action_dependants.values())
                        independent_dependencies = [
                            n for n in all_dependency_actions
                            if n not in all_dependent_actions
                        ]
                        if not independent_dependencies:
                            logger.error(
                                "No independent dependencies found while resolving dependencies: {} (original: {})!"
                                .format(action_dependants,
                                        original_dependants))
                            return
                        for action_fullname in independent_dependencies:
                            if action_fullname not in sorted_actions:
                                sorted_actions[
                                    action_fullname] = firstboot_actions[
                                        action_fullname]
                                actions_involved_in_dependencies.append(
                                    action_fullname)
                            action_dependants.pop(action_fullname)
                    # The while() has run its course and the dependencies have been linearized
                    all_dependent_actions = flatten(
                        original_dependants.values())
                    all_unadded_dependent_actions = [n for n in all_dependent_actions \
                      if n not in sorted_actions]
                    for action_fullname in all_unadded_dependent_actions:
                        sorted_actions[action_fullname] = firstboot_actions[
                            action_fullname]
                        actions_involved_in_dependencies.append(
                            action_fullname)
                    logger.info("Dependencies resolved!")
                # Sorting actions for consistent firstboot experience
                sorted_actions = self.sort_actions_by_ordering(
                    sorted_actions, actions_involved_in_dependencies)
                # Now, executing actions one-by-one
                failed_actions = []
                log_completed_action_has_failed = False
                for action_fullname, action in sorted_actions.items():
                    if action.depends:
                        if any([d in failed_actions for d in action.depends]):
                            logger.error(
                                "Not executing action {} because some of its dependencies ({}) are among failed dependencies: {}"
                                .format(action_fullname, action.depends,
                                        failed_actions))
                            continue
                    action_provider, action_name = get_prov_and_name(
                        action_fullname)
                    if action.will_context_switch:
                        self.context.request_switch(action_provider,
                                                    start_thread=False)
                    try:
                        result = action.func()
                    except:
                        logger.exception("Action {} failed to execute!".format(
                            action_fullname))
                        failed_actions.append(action_name)
                    else:
                        if result is False:  # Action failed internally
                            failed_actions.append(action_name)
                        status = {
                            False: "fail",
                            True: "success",
                            None: "skip"
                        }.get(result, "success")
                        action_dict = {
                            "action": action_fullname,
                            "status": status
                        }
                        action_result = json.dumps(action_dict)
                        try:
                            with open(firstboot_file, 'a') as f:
                                f.write(action_result + '\n')
                        except:
                            # Avoid cluttering the logs - logger.exception writes the entire traceback into logs
                            # while logger.error just writes the error message
                            if not log_completed_action_has_failed:
                                logger.exception(
                                    "Can't write action {} into firstboot logfile {}!"
                                    .format(action_fullname, firstboot_file))
                                log_completed_action_has_failed = True
                            else:
                                logger.error(
                                    "Can't write action {} into firstboot logfile {}!"
                                    .format(action_fullname, firstboot_file))
                    self.context.request_switch()
Пример #26
0
raw = open('data/day20.txt', 'r').read().split('\n\n')


def to_int(line):
    return [
        int(line.replace('.', '0').replace('#', '1'), 2),
        int(line[::-1].replace('.', '0').replace('#', '1'), 2)
    ]


tiles = dict()
for r in raw:
    lines = r.split('\n')
    k = int(lines[0][5:9])
    b = list()
    b.extend(to_int(lines[1]))
    b.extend(to_int(lines[len(lines) - 1]))
    b.extend(to_int(''.join(x[0] for x in lines[1:])))
    b.extend(to_int(''.join(x[len(x) - 1] for x in lines[1:])))
    tiles[k] = b

flat_borders = list(flatten(tiles.values()))
outers = [x for x in flat_borders if flat_borders.count(x) == 1]

corners = [
    k for k, v in tiles.items() if sum(1 for x in v if x in outers) == 4
]

print("Puzzle 20.1: ", reduce(mul, corners))
Пример #27
0
 def create_parents(Adj):
     Product = {node: [] for node in flatten(Adj.values())}
     for parent, children in Adj.items():
         for child in children:
             Product[child] = parent
     return Product
Пример #28
0
def getEqnArrayOfArraysResidual(arr, removeEmpty=True):
    # TODO: I can remove this?
    # Given an array of arrays ([lines] and [elements in equations])
    # Return the residual elements after removing operators and SELogic items
    return getEqnResidual(flatten(arr), removeEmpty)
Пример #29
0
 def get_unique(list_of_lists):
     return list(set(flatten(list_of_lists)))
Пример #30
0
def getEqnArrayOfArraysResidual(arr, removeEmpty=True):
    # TODO: I can remove this?
    # Given an array of arrays ([lines] and [elements in equations])
    # Return the residual elements after removing operators and SELogic items
    return getEqnResidual(flatten(arr),removeEmpty)
Пример #31
0
def get_grammar(sentence):
    """
    Use Stanford CoreNLP to extract grammar from Stanford NLP Java utility
    Return 
       root topic (lower-case string - "Core"),
       subj (list with main subj first, compounds after) 
       obj (list with main obj first, compounds after)
    """
    os.environ['JAVAHOME'] = JAVA_HOME  # Set this to where the JDK is
    dependency_parser = StanfordDependencyParser(path_to_jar=STANFORD_NLP, path_to_models_jar=STANFORD_MODELS)
    
    regexpSubj = re.compile(r'subj')
    regexpObj = re.compile(r'obj')
    regexpMod = re.compile(r'mod')
    regexpNouns = re.compile("^N.*|^PR.*")
    
    sentence = sentence.lower()

    #return grammar Compound Modifiers for given word
    def get_compounds(triples, word):
        compounds = []
        for t in triples:
            if t[0][0] == word:
                if t[2][1] not in ["CC", "DT", "EX", "LS", "RP", "SYM", "TO", "UH", "PRP"]:
                    compounds.append(t[2][0])
        
        mods = []
        for c in compounds:
            mods.append(get_modifier(triples, c))
        
        compounds.append(mods)
        return compounds
    
    def get_modifier(triples, word):
        modifier = []
        for t in triples:
            if t[0][0] == word:
                 if regexpMod.search(t[1]):
                     modifier.append(t[2][0])
                     
        return modifier

    #Get grammar Triples from Stanford Parser
    result = dependency_parser.raw_parse(sentence)
    dep = next(result)  # get next item from the iterator result
    
    #Get word-root or "topic"
    root = [dep.root["word"]]
    root.append(get_compounds(dep.triples(), root[0]))
    root.append(get_modifier(dep.triples(), root[0]))
    
    subj = []
    obj = []
    lastNounA = ""
    lastNounB = ""
    
    for t in dep.triples():
        if regexpSubj.search(t[1]):
            subj.append(t[2][0] )
            subj.append(get_compounds(dep.triples(),t[2][0]))
        if regexpObj.search(t[1]):
            obj.append(t[2][0])
            obj.append(get_compounds(dep.triples(),t[2][0]))
        if regexpNouns.search(t[0][1]):
            lastNounA = t[0][0]
        if regexpNouns.search(t[2][1]):
            lastNounB = t[2][0]
    
    return list(helpers.flatten([root])), list(helpers.flatten([subj])), list(helpers.flatten([obj])), list(helpers.flatten([lastNounA])), list(helpers.flatten([lastNounB]))
Пример #32
0
    selectlist=[]
    

    while flag:
        URL = 'https://www.fashion-press.net/snaps/sex/mens?page=' + str(page)
        soup = openURL(URL)
        snaps = soup.find_all('div',attrs={'class': 'fp_media_tile snap_media col_3'})

        if len(snaps) != 0:  # 写真がある場合はブランドを取得
            tmpBrandList = getBrandList(snaps)
            BrandList.extend(tmpBrandList)
            selectlist.extend(getselectlist(snaps))
            NameList.extend(getNameList(snaps))
            # print('get page' + str(page))
            page += 1

        else:  # 写真がない場合は終了
            flag = False
            # print('END')

    df = pd.DataFrame(data=BrandList, index=NameList)  # pandasのDataFrame型に
    df.to_csv('StreetSnapMen.csv')
    # df = pd.read_csv('StreetSnapMen.csv', index_col = 0)

    elementlist=list(flatten(selectlist)) 
    set_menslist=list(set(elementlist))
    set_menslist.sort()
    
    with open("selectmenslist.csv","w")as f:
        writer = csv.writer(f)
        writer.writerow(set_menslist)
Пример #33
0
 def get_positive_samples(self, blocked_set):
     return set(
         helpers.flatten([
             self.ranking[key] for key in self.ranking
             if helpers.set_in_set(key, blocked_set)
         ]))
Пример #34
0
 def unlucky_numbers(self, drawn_numbers):
     return [n for n in flatten(self.rows) if n not in drawn_numbers]