Exemple #1
0
def main(args):
    print('Reading config...')
    config = utilities.read_json_config(args.config, utilities.Task.inference)
    print('Preparing output directory...')
    output_dir = '{}/{}/inference'.format(config['dir']['output'],
                                          config['name'])
    utilities.create_dir(output_dir)
    print('Preparing dataset...')
    datasets = prepare_dataset(config, COMBINED_COL)
    print('Preparing predictors...')
    main_predictor = utilities.load(config['model']['main']['file'])
    stringtable_predictor = utilities.load(
        config['model']['stringtable']['file'])
    prune_predictor = utilities.load(config['model']['prune']['file'])
    otyrt_predictor = utilities.load(config['model']['otyrt']['file'])

    print('Preparing other output dirs')
    cdf_dir = '{}/cdf'.format(output_dir)
    gnuplot_dir = '{}/gnuplot'.format(output_dir)
    plot_dir = '{}/plot'.format(output_dir)

    utilities.create_dir(cdf_dir)
    utilities.create_dir(gnuplot_dir)
    utilities.create_dir(plot_dir)

    pbar = tqdm(range(len(datasets)))
    for idx in pbar:
        name = config['data'][idx]['name']
        pbar.set_description(
            'Outputting performance metrics for dataset {}'.format(name))
        mse, r2 = test_predictor(datasets[idx], main_predictor,
                                 stringtable_predictor, prune_predictor,
                                 otyrt_predictor)
        pbar.set_description('Generating diffs for dataset {}'.format(name))
        diff = generate_diff(datasets[idx], main_predictor,
                             stringtable_predictor, prune_predictor,
                             otyrt_predictor)
        pbar.set_description(
            'Saving diffs for dataset {} prediction'.format(name))
        sorted_indexes = save_diff(config, cdf_dir,
                                   config['data'][idx]['name'], diff)
        pbar.set_description(
            'Creating plot for database {} prediction'.format(name))
        save_plot(config['model'], config['data'][idx], cdf_dir, gnuplot_dir,
                  plot_dir, diff, sorted_indexes)

    print('Saving combined plot')
    save_plots(config, cdf_dir, gnuplot_dir, plot_dir)
Exemple #2
0
def start_game_play(player, gameDisplay):
    print(111111111111111111)
    w, h = pygame.display.get_surface().get_size()
    music_player = music.Music_Player()
    music_player.play_ambtrack2()
    gameDisplay.fill(config.black)
    buttons = [
        Button("BACK", config.white,
               pygame.font.Font("assets/fonts/CHILLER.ttf", 70), (90, 60),
               gameDisplay)
    ]
    gui = pygame.Surface((w, h))
    while True:
        for event in pygame.event.get():
            if event.type == pygame.KEYDOWN:
                if event.key == pygame.K_ESCAPE:
                    global paused
                    paused = True
                    print(420420420)
                    utilities.pause()
                #if s button is pressed save the character's stats
                if event.key == pygame.K_s:
                    print('saved the game')
                    utilities.save(player)
                #if l button is pressed load the character's stats
                if event.key == pygame.K_l:
                    utilities.load()
                    player.pos = utilities.loaddata['pos']
                    player.health = utilities.loaddata['health']
                    player.actions = utilities.loaddata['actions']
                    player.items = utilities.loaddata['items']
                    player.hp = utilities.loaddata['hp']
            elif (event.type == pygame.QUIT):
                pygame.quit()
                quit()
            elif (event.type == pygame.MOUSEBUTTONDOWN and event.button == 1
                  and buttons[0].rect.collidepoint(pygame.mouse.get_pos())):
                pass
                # main_menu()

        # for button in buttons:
        #     Button.check_Hover(button, gameDisplay)
        pygame.display.update()
        gui.fill(config.white)

        gameDisplay.blit(gui, (0, 0))
        game_start(player, gameDisplay)
        clock.tick(15)
Exemple #3
0
    def load(folder, width, height):

        print('Loading database from {}'.format(folder))

        database_structure = utilities.get_database_structure(folder)

        database = ImageDatabase(width, height, folder)

        start_time = time.time()
        chunks = database_structure.get_list_names()
        total = len(chunks)

        print('Loading {} chunks from {}'.format(
            total, database_structure.image_folder))

        database.images = []
        for index, chunk in enumerate(chunks):
            database.images += [
                ImageItem(image, database.width, database.height)
                for image in utilities.load(chunk)
            ]
            utilities.print_progress(index + 1, total)
            if settings.MAX_CHUNKS_USE and settings.MAX_CHUNKS_USE == index + 1:
                print('Reached limit for max chunks use')
                break
        utilities.print_done(time.time() - start_time)

        return database
def get_mc(update=False, verbose=False):

    for year in year_tags:
        fname = f'mc_{year}'
        result = load(fname)
        for dataset in datasets:
            query = 'dataset dataset=/{}/{}*/NANOAODSIM'.format(
                dataset, year_tags[year])
            samples = get_das(query, verbose=verbose)
            if not samples: continue
            thesedatasets = set(s.split('/')[1] for s in samples)
            for thisdataset in thesedatasets:
                # NOTE: manually remove QCD_Pt-
                if 'QCD_Pt-' in thisdataset: continue
                if thisdataset not in result: result[thisdataset] = {}
                sampleMap = result[thisdataset].get('files', {})
                goodsamples = []
                for sample in samples:
                    if not update and sample in sampleMap: continue
                    if 'Validation error' in sample: continue
                    if sample.split('/')[1] != thisdataset: continue
                    query = 'file dataset={}'.format(sample)
                    sampleMap[sample] = get_das(query, verbose=verbose)
                    goodsamples += [sample]

                result[thisdataset] = {
                    'datasets': goodsamples,
                    'files': sampleMap
                }
        dump(fname, result)
    def __init__(self, account):
        """
        account is either a path to an account to load
        or a dictionary with attribute "logins" as a list of login dictionaries
        """
        from login import Login
        from queue import Queue

        argument = account
        if type(account) is str:
            argument = utilities.load(account + "/account.json")
            self.savepath = account

        for key in argument.keys():
            setattr(self, key, argument[key])
        self.queues = []  # this field is read-only, not loaded into memory

        for i in range(len(self.logins)):
            self.logins[i] = Login(self.logins[i])
            try:
                path = self.savepath + "/"
                profs = self.logins[i].profiles()
                self.queues.extend(
                    map(lambda x: Queue(x, path=path + Queue.profile_name(x)),
                        profs))
            except AttributeError:
                self.queues.extend(map(Queue, self.logins[i].profiles()))

        self.subscriptions = argument["subscriptions"]
def predict(path, X):
    a, b = load(path)
    predicted_y = []
    for x in X:
        y = a + b * x
        predicted_y.append(y)

    return predicted_y
Exemple #7
0
def predict(path, X):
    a, b = load(path)
    predicted_y = []
    for x in X:
        y = a + b * x
        predicted_y.append(y)

    predicted_y = np.array(predicted_y).reshape((len(predicted_y), 1))
    return predicted_y
 def load(path):
     """
     Calls __init__ of dict["definition"]["type"] where dict is json at path
     Returns Station subclass definition>type defined in stations.py
     """
     import stations
     diction = utilities.load(path)
     sta = eval("stations." + diction["type"] + "(diction)")
     sta.name = path.split("/")[-1].split(".")[0]
     return sta
    def __init__(self, profile, path=None):
        self._prof = profile

        part = path
        if not path:
            part = Queue.profile_name(profile)

        try:
            self.reserve = utilities.load(part)["reserve"]
        except IOError:
            self.reserve = []
def maybe_dump(path, fileset):
    # if the file exists: read in the current data, and test for differences
    replace = False
    oldfileset = load(path)
    for dataset in fileset:
        oldfilelist = oldfileset.get(dataset, [])
        filelist = fileset[dataset]
        if Counter(oldfilelist) != Counter(filelist):
            replace = True
    if not replace: return
    dump(path, fileset)
Exemple #11
0
    def update_existing_database(self):
        print('Updating existing database')
        meta_dict = utilities.load(self.database_meta_file)

        files_to_add = [
            file for file in self.files if file not in meta_dict['files']
        ]
        total_files = len(files_to_add)
        if total_files == 0:
            print('There is no new images found ... [ done ]')
            return
        database_width = meta_dict['image width']
        database_height = meta_dict['image height']
        max_chunk_size = meta_dict['chunk size']
        chunk_count = meta_dict['chunk count']
        file_type = meta_dict['file type']
        database_path = meta_dict['database path']
        cache_images = []
        num_of_cache_images = 0
        data_paths = []

        for index, file in enumerate(files_to_add):
            cache_images.append(
                DatabaseItem(file, database_width, database_height))
            num_of_cache_images += 1
            if num_of_cache_images >= max_chunk_size:
                chunk_count += 1
                data_path = database_path + os.path.sep + chunk_count + file_type
                utilities.save(cache_images, data_path)
                data_paths.append(data_path)
                cache_images = []
                num_of_cache_images = 0
            utilities.print_progress(index + 1, total_files)
        utilities.print_done()

        # save remaining cached images
        if cache_images:  # if not empty
            chunk_count += 1
            data_path = database_path + os.path.sep + chunk_count + file_type
            utilities.save(cache_images, data_path)
            data_paths.append(data_path)

        meta_dict['number of files'] += len(files_to_add)
        meta_dict['files'] += files_to_add
        meta_dict['paths'] += data_paths
        meta_dict['chunk count'] += chunk_count

        utilities.save(meta_dict, meta_dict['meta file path'])
Exemple #12
0
    print('--------------------------------')
    # m4a转换成mp3
    #mp3_audio = utilities.convert(audio)
    # 生成wav文件名和路径
    wav_audio = path + "/" + name + ".wav"
    print "wav文件路径预设为: " + wav_audio
    print('--------------------------------')
    # 批量文件路径下的mp3转wav
    #audioAnalysis.dirMp3toWavWrapper(path, 16000, 1)
    # hmm分段,并生成segment文件
    segFileName = path + "/" + name + ".segment"
    print "seg文件路径预设为: " + segFileName
    print('--------------------------------')
    #[flagsInd, classesAll, acc, CM] = aS.hmmSegmentation(wav_audio, "data/hmmRadioSM", segFileName, True, '')
    # 根据seg去除100秒以上的music
    cmd = "mkdir " + path + "/" + name
    sh.run(cmd)

    # 记录分段起止时间
    delimit_list = aS.segWAV(wav_audio)

    # 读取转写结果
    for i in (0, len(delimit_list) - 1):
        records_list = utilities.load(path + "/" + name + "/" + name + "_" +
                                      delimit_list[i][0] + "-" +
                                      delimit_list[i][1] + ".json")

        # 找到关键词出现的时间
        feedback = utilities.feedback(records_list, keywords)
        print "检查结果:" + str(feedback)
Exemple #13
0
    max_sig_idx = discovery_sig_df['ZA'].idxmax()
    max_sig_data = discovery_sig_df.iloc[max_sig_idx]
    max_sig = max_sig_data['ZA']
    max_sig_cut = max_sig_data['t_cut']
    max_sig_str = 'Max significance {:.2f} for cut value {:.4f}'.format(
        max_sig, max_sig_cut)
    print max_sig_str
    return {'str': max_sig_str, 'cut': max_sig_cut, 'value': max_sig}


if __name__ == '__main__':
    args = parse_args()
    X_sig, X_bkg, w_sig, w_bkg = get_test_sig_bkg()

    log.info('Loading classifier from {}'.format(args['clf']))
    clf = utilities.load(args['clf'])
    log.info('Loaded classifier\n{}'.format(clf))

    log.info('Calculating predictions...')
    p_sig = clf.decision_function(X_sig)
    log.debug(p_sig)
    log.debug('Mean: {:.5f}'.format(np.mean(p_sig)))
    p_bkg = clf.decision_function(X_bkg)
    log.debug(p_bkg)
    log.debug('Mean: {:.5f}'.format(np.mean(p_bkg)))

    ds = discovery_significance(p_sig,
                                p_bkg,
                                w_sig,
                                w_bkg,
                                t_range=(-2.5, 2.5))
Exemple #14
0
# MATHEW LEWIS, JUNE 2020

import numpy as np
import tensorflow as tf
tf.keras.backend.set_floatx('float64')
from utilities import run,load

# load the test data
x_test = np.load('data/x_test.npy')
y_test = np.load('data/y_test.npy')

# run the model on some test data
result = run(load('models/model'),x_test[:10])

# compare the model's output with the correct answer
print('correct answer:\t\t',list(y_test[:10]))
print('models output:\t\t',result)
Exemple #15
0
#!/usr/bin/env python

__author__ = 'cclamb'

import gzip
import utilities
import timeit

f = gzip.open('mnist-sd19.pkl.gz', 'rb')
data = utilities.load(f)
f.close()
utilities.create_hdf5_archive(data)
Exemple #16
0
import numpy as np
import tensorflow as tf

tf.keras.backend.set_floatx('float64')
from utilities import run, load

# load the test data
x_test = np.load('data/x_test.npy')
y_test = np.load('data/y_test.npy')

# test the model
model = load('models/model')
score = model.evaluate(x_test, y_test, verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], score[1] * 100))
        dr[dr < -self.box.hside] += self.box.side

        norm_dr = np.linalg.norm(dr, axis=1)

        norm_dr = norm_dr[norm_dr < self.interaction.cutoff]

        u = self.interaction.potential(norm_dr,
                                       self.interaction.parameters).sum()

        return u


# start with one particle
r = np.array([[0, 0, 0]])

r = ut.load('last_pos.dat.xyz')
sim = Simulation(r, Box([L, L, L], [0, 0, 0]),
                 Interaction(twosteps, r3, parameters))
# insert particles
# r = moves.insert_hard_spheres(sim, N, r1, r3)

# ut.write_xyz("begin.xyz", sim.r)

move = moves.LocalMove(sim, delta, beta)

# x = np.arange(0,4,0.01)
# pl.plot(x, sim.interaction.potential(x,sim.interaction.parameters))
# pl.ylim(-2,2)
# pl.show()
for it in tqdm.tqdm(range(iterations)):
Exemple #18
0
 def _load(inputs):
     return ImageItem(utilities.load(inputs[0]), inputs[1],
                      inputs[2])  # file width and height

def maybe_dump(path, fileset):
    # if the file exists: read in the current data, and test for differences
    replace = False
    oldfileset = load(path)
    for dataset in fileset:
        oldfilelist = oldfileset.get(dataset, [])
        filelist = fileset[dataset]
        if Counter(oldfilelist) != Counter(filelist):
            replace = True
    if not replace: return
    dump(path, fileset)


fulldatafileset = load('data')
fullmcfileset = load('mc')

years = ['2016', '2017', '2018']
for year in years:
    outpath = 'filesets/{year}/{sample}'
    fileset = {}

    datafileset = {}
    for s in fulldatafileset[year]:
        thisfileset = {s: []}
        for d in fulldatafileset[year][s]['datasets']:
            thisfileset[s] += fulldatafileset[year][s]['files'][d]
        maybe_dump(outpath.format(year=year, sample=s), thisfileset)
        datafileset.update(thisfileset)
    maybe_dump(outpath.format(year=year, sample='data'), datafileset)
Exemple #20
0
    def check_and_load_database(self):

        # do some checking first
        if not os.path.isdir(self.database_path):
            raise ValueError('Database does not exists')

        if not os.path.isfile(self.database_meta_file):
            raise ValueError('Corrupted database: missing meta file')

        meta_dict = utilities.load(self.database_meta_file)
        database_chunk_size = meta_dict['chunk size']
        if database_chunk_size != settings.DATABASE_CHUNK_SIZE:
            raise ValueError(
                'Existing database has different chunk size signature | database {} | program {}'
                .format(database_chunk_size, settings.DATABASE_CHUNK_SIZE))

        if settings.DATABASE_IMAGE_WIDTH != meta_dict['image width']:
            raise ValueError(
                'Existing database has different image width signature | database {} | program {}'
                .format(settings.DATABASE_IMAGE_WIDTH,
                        meta_dict['image width']))

        if settings.DATABASE_IMAGE_HEIGHT != meta_dict['image height']:
            raise ValueError(
                'Existing database has different image height signature | database {} | program {}'
                .format(settings.DATABASE_IMAGE_HEIGHT,
                        meta_dict['image height']))

        # now process the database
        num_of_database_images = meta_dict['number of files']
        num_of_files_found = len(self.files)

        if num_of_database_images != num_of_files_found:

            if num_of_database_images == 0:
                self.create_and_load_database()
                return

            while True:
                print('Files found: {} | Database size: {}'.format(
                    num_of_files_found, num_of_database_images))
                print('[1] use existing database')
                print('[2] create new database')
                print('[3] update existing database')
                res = input('Please enter a number:')
                if res == '1':
                    break
                elif res == '2':
                    self.create_and_load_database()
                    return
                elif res == '3':
                    self.update_existing_database()
                    meta_dict = utilities.load(
                        self.database_meta_file)  # update for below use
                    break
                print('Please provide your answer as \'y\' or \'n\'')

        images_list_paths = meta_dict['paths']
        images_list_size = len(images_list_paths)

        print('Loading database | {} | chunks {} | size per chunk {}'.format(
            self.database_path, images_list_size, meta_dict['chunk size']))

        self.loaded_image_items = []

        for index, images_path in enumerate(images_list_paths):
            try:
                database_items = utilities.load(images_path)
            except AttributeError as e:
                raise ValueError(
                    'Database Object has different signature, did you change your code structure? | {}'
                    .format(e))
            self.loaded_image_items += [
                ImageItem(item, self.width, self.height)
                for item in database_items
            ]
            utilities.print_progress(index + 1, images_list_size)
        utilities.print_done()