Exemple #1
0
 def run(seed):
     ans = sol_fn(seed, inp, log)
     process(inp,
             ans,
             seed,
             sc_fn,
             args.testcase,
             ignore=args.ignore,
             force=args.force)
Exemple #2
0
 def run(seed):
     ans = get_ans(seed)
     process(inp,
             ans,
             seed,
             sc_fn,
             args.testcase,
             ignore=args.ignore,
             force=args.force)
Exemple #3
0
    def get_discography(self, artist, song_name):
        """
        Return the list of albums and their track names.

        Invalid albums (as decided by `is_value_invalid()`) are not included.
        Song names are preprocessed using `process()`.

        The result is a dictionary indexed by album name and sorted by release
        date.
        """
        query = self.sp.search(f'artist:{artist} track:{song_name}',
                               type='track')
        artist_id = query['tracks']['items'][0]['artists'][0]['id']
        query = self.sp.artist_albums(artist_id, album_type='album')
        artist_albums = {}
        while query:
            for album in query['items']:
                name = process(album['name'], key='album')
                name = name.lower()
                if is_value_invalid(name, key='album'):
                    continue
                _set_release_date(album)
                elem = dict(id=album['id'], release_date=album['release_date'])
                artist_albums[name] = elem
            query = self.sp.next(query)
        sort = sorted(artist_albums.items(),
                      key=lambda x: x[1]['release_date'])
        artist_albums = dict(sort)

        album_ids = [album['id'] for album in artist_albums.values()]
        for albums, ids in chunks(artist_albums.values(), album_ids, 20):
            query = self.sp.albums(ids)

            for album, response in zip(albums, query['albums']):
                tracks = []
                while response:
                    if 'tracks' in response:
                        response = response['tracks']
                    tracks.extend(
                        process(t['name'], key='name').lower()
                        for t in response['items'])
                    response = self.sp.next(response)
                tracks = dict.fromkeys(tracks)
                tracks.pop('Unknown', None)
                album['tracks'] = list(tracks)
        return {
            k: v
            for k, v in artist_albums.items() if v.get('tracks', None)
        }
    def _addrow(self, row, tablename):
        c = self.conn.cursor()

        
        (transcript_si, transcript, decode_si,
             decode, conf, decode_time, callsrepath, acoustic_model,
             date, time, milliseconds, grammarlevel, firstname, lastname,
             oration_id, chain, store) = process(row)

        # There might a better way. Breaking out of a function if the transcript column is empty.
        # This files without transcription are skipped
        if transcript in ['', '""', ' ', None, 'NULL', '(NULL)']:
            print 'No transcription for %s'
            print callsrepath
            return False
            
        # Insert row into database
        # Check if row exists. If yes don't add. If not add
        datarow_exist_status = self._check_if_row_exists(
                time, milliseconds, firstname, lastname, grammarlevel, tablename)
        if datarow_exist_status:
            print 'The data row you are trying to add might already exist'
        else:
            print 'Adding data row to database'
            sql_statement = '''INSERT INTO %s VALUES ("%s", "%s", "%s", "%s", %d, %d, "%s", "%s", %s, %s, %s, "%s", "%s", "%s", "%s", "%s", "%s", "%s")''' % (
                    tablename, transcript_si, transcript, decode_si,
                    decode, int(conf), int(
                        decode_time), callsrepath, acoustic_model,
                    date, time, milliseconds, grammarlevel, firstname, lastname,
                    oration_id, chain, store, row)
            try:
                c.execute(sql_statement)
            except:
                print 'Could not enter row to database'
def test_1():
    from util import read_file_to_list
    from util import process
    data = read_file_to_list("testinput.txt")
    terminate, acc = process(data)
    assert terminate == False
    assert acc == 5
Exemple #6
0
    def capture(self, image_name):
        currentDir = os.getcwd()

        img_path = "%s/%s.jpg" % (currentDir, image_name)
        print('image path:' + img_path)
        self.camera.capture(img_path, use_video_port=True)
        image = util.process(img_path, assemble.get_height_each())
        self.session_images.append(image)
Exemple #7
0
    def capture(self, image_name):
        currentDir = os.getcwd()

        img_path = "%s/%s.jpg" % (currentDir, image_name)
        print("image path:" + img_path)
        self.camera.capture(img_path, use_video_port=True)
        image = util.process(img_path, assemble.get_height_each())
        self.session_images.append(image)
def generate_victim_offender():
    victims = []
    offenders = []
    final = []
    file_path = util.find_all("people.json")
    count = 0
    with open(file_path, "r") as people_json:
        people = json.load(people_json)
        for person in people:
            if person['Victim']:
                victims.append(person['SSN'])
            else:
                offenders.append(person['SSN'])
            count += 1
    util.process(count)

    # now lets make the connection between the two arrays.
    print(f"length of victims: {len(victims)}")
    print(f"length of offenders: {len(offenders)}")
    # length of victims is always larger than offenders (percentages)
    # therefore, we have a temp_offenders array that stores all deleted offenders
    # in case we run out of offenders to pair with victimes, hence the if stmt

    temp_offenders = []
    for victim in victims:
        length = len(offenders) - 1

        if length < 0:
            l = len(temp_offenders) - 1
            num = random.randint(0, l)
            offender = temp_offenders[num]
            temp_offenders.remove(temp_offenders[num])
            temp = [victim, offender]
            final.append(temp)

        else:
            num = random.randint(0, length)
            offender = offenders[num]
            temp_offenders.append(offenders[num])
            offenders.remove(offenders[num])
            temp = [victim, offender]
            final.append(temp)

    util.output_SQL("VICTIM_OFFENDER", final,
                    "/mysql/insertVictimOffender.sql")
    util.output_json(final, "victim_offender.json")
def test_2():
    from util import read_file_to_list
    from util import process
    from util import fix_command
    data = read_file_to_list("testinput.txt")
    data1 = fix_command(data, 7)
    terminate, acc = process(data1)
    assert terminate == True
    assert acc == 8
Exemple #10
0
 def __iter__(self):
     tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
     tknzr = TweetTokenizer(preserve_case=False);
     stop_words = set(stopwords.words("english"))
     for paragraph in self.paragraph_iterator():
         raw_sentences = tokenizer.tokenize(paragraph.decode('utf8').strip())
         for raw_sentence in raw_sentences:
             # process the sentence
             sentence = util.process(raw_sentence, tknzr, self.process_option, stop_words)
             yield sentence
	def readCorpus(self,corpusName):
		print "Loading corpus"
		with open(corpusName,'r') as f:
			for tweet in f.readlines():
				tweet = tweet.strip()
				#print tweet
				self.corpus.append(tweet)
				#print process(tweet)
				self.bagOfWords.append(process(tweet))
		return self.bagOfWords
def generate_weapons():

    file_name = util.find_all("weapons.csv")
    with open(file_name) as csv_file:
        csv_reader = csv.reader(csv_file, delimiter=',')
        line_count = 0
        weapons = []

        for row in csv_reader:
            if line_count == 0:  # some glitch (weird UTF-8 symbol)
                weapon = row[0][1:]
            else:
                weapon = row[0]
            fatalities = row[1]
            temp = {weapon: fatalities}
            weapons.append(temp)
            line_count += 1

        util.process(line_count)
        return weapons
Exemple #13
0
 def __iter__(self):
     tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
     tknzr = TweetTokenizer(preserve_case=False)
     stop_words = set(stopwords.words("english"))
     for paragraph in self.paragraph_iterator():
         raw_sentences = tokenizer.tokenize(
             paragraph.decode('utf8').strip())
         for raw_sentence in raw_sentences:
             # process the sentence
             sentence = util.process(raw_sentence, tknzr,
                                     self.process_option, stop_words)
             yield sentence
Exemple #14
0
    def fetch_album(self, song):
        """
        Get the name of the album for a song from spotify.
        """
        artist, title = song.artist, song.title
        self.fetch_discography(song)
        if not self.discography_cache.get(artist, ''):
            return 'Unknown'

        title = process(title, key='name')
        for album_name, info in self.discography_cache[artist].items():
            if title.lower() in map(str.lower, info['tracks']):
                return album_name
        return 'Unknown'
Exemple #15
0
def run():
	camera.warmup(width, height)
	images = []
	for i in range(takes):			
		pic_paths = camera.take(1, width, height)
		start = time.time()
		image1 = util.process(pic_paths[0], assemble.get_height_each())
		end = time.time()
		diff = end - start
		images.append(image1)
		print "took in: " + str(diff) + "ms"
		if(diff < PAUSE_MAX and i < takes - 1):
			sleep_time = PAUSE_MAX - diff
			print "sleeping for another: " + str(sleep_time) + "ms"
			time.sleep(diff)

	assemble.assemble(images)
Exemple #16
0
def run():
    camera.warmup(width, height)
    images = []
    for i in range(takes):
        pic_paths = camera.take(1, width, height)
        start = time.time()
        image1 = util.process(pic_paths[0], assemble.get_height_each())
        end = time.time()
        diff = end - start
        images.append(image1)
        print "took in: " + str(diff) + "ms"
        if (diff < PAUSE_MAX and i < takes - 1):
            sleep_time = PAUSE_MAX - diff
            print "sleeping for another: " + str(sleep_time) + "ms"
            time.sleep(diff)

    assemble.assemble(images)
Exemple #17
0
 def get_album_tracks(self, song):
     """
     Get the list of tracks of the album this song belongs to.
     """
     if song.album and song.album != 'Unknown':
         song.album = process(song.album, key='album', invalid=False)
         self.fetch_discography(song)
     else:
         logger.debug('song has no embedded album. searching')
         song.album = self.fetch_album(song)
         logger.debug('got this album: %s', song.album)
     try:
         if not song.album or song.album == 'Unknown':
             raise KeyError('Album not found')
         return self.discography_cache[song.artist][song.album]['tracks']
     except KeyError:
         msg = 'Spotify could not find the list of tracks for %s'
         logging.info(msg, song)
         return []
if shuffle == 1:
    shuffled_pairs = util.random_shuffle(training_pairs)

save_root = save_p + '/set%02d_%s' % (set_no, PHASE)
if HNM == 1:
    save_root = save_root + '_hnm'
print 'saved to: ' + save_root
if os.path.exists(save_root):
    shutil.rmtree(save_root)
os.makedirs(save_root)
X = np.zeros((2000, 6, 160, 80), dtype=np.float32)
map_size = X.nbytes * 1000
env = lmdb.open(save_root, map_size=map_size)

util.process(shuffled_pairs, M, N, transformer, env)

PHASE = 'test'
training_pairs = []
for setname in dataset_list:
    if dataset_info_dict[setname]['Used'] == 1:
        filename_part = dataset_info_dict[setname]['PartitionTest']
        if not setname == 'MARKET':
            main_dict = dataset_info_dict[setname]['Util'].collect_data(
                dataset_info_dict[setname]['DatasetPath'])
            this_list = dataset_info_dict[setname][
                'Util'].partition_file_to_list(filename_part)
            sub_dict = util.get_sub_dict(main_dict, this_list)
        else:
            main_dict = dataset_info_dict[setname]['Util'].collect_data(
                dataset_info_dict[setname]['DatasetPath'] +
Exemple #19
0
sp.load_data()
sp.train_df['X_train'] = list(sp.X_train)
sp.train_df['y_train'] = list(sp.y_train)
train_df = sp.train_df

n_fold = 4
train_df.sort_values('coverage_class', inplace=True)
train_df['fold'] = (list(range(n_fold)) *
                    train_df.shape[0])[:train_df.shape[0]]
subsets = [train_df[train_df['fold'] == i] for i in range(n_fold)]

for fold_idx in range(n_fold):
    # get the train/val split
    X_tr, X_val, y_tr, y_val = get_train_val(subsets, fold_idx)
    # add depth information or triple the gray channel
    X_tr, y_tr = process(X_tr, y_tr, use_depth=use_depth)
    X_val, y_val = process(X_val, y_val, use_depth=use_depth)
    y_tr = np.squeeze(y_tr)
    y_val = np.squeeze(y_val)

    # prepare PyTorch dataset and dataloader
    train_ds = SaltDataset(X_tr, y_tr)
    val_ds = SaltDataset(X_val, y_val, transform=None)
    train_loader = torch.utils.data.DataLoader(train_ds,
                                               batch_size=32,
                                               shuffle=True)
    val_loader = torch.utils.data.DataLoader(val_ds,
                                             batch_size=32,
                                             shuffle=True)

    net = UNetResNet(34)
Exemple #20
0
 def run(solve_args):
     solve_args_orig = dict(solve_args)
     ans = get_ans(solve_args)
     process(inp, ans, solve_args_orig, sc_fn)
Exemple #21
0
 def __iter__(self):
     for document in self.field_iterator(setting.csv_option.review_name):
         yield util.process(document)
Exemple #22
0
def test_process():
    from util import process

    assert 73 == process('XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X', 11)
Exemple #23
0
def test_process():
    from util import process
    assert 0 == process([0, 3, 6], 10)
    assert 1 == process([1, 3, 2], 2020)
Exemple #24
0
 def summarize(self):
     for graph_id in self.dags.keys():
         latency.process('%s/data/%s/dag' % (self._local_log_dir, graph_id),
                         graph_id, '%s/summary' % (self._local_log_dir))
     util.process('%s/util' % (self._local_log_dir),
                  '%s/summary' % (self._local_log_dir))
Exemple #25
0
def test_process(name, expect):
    assert process(name, key='name') == expect
Exemple #26
0
 def run(solve_args):
     solve_args_orig = dict(solve_args)
     ans = get_ans(solve_args)  # run the solver function and get the answer
     process(inp, ans, solve_args_orig,
             sc_fn)  # add to best submission or not