コード例 #1
0
def main_work():

    #################################################

    # ============= Process command line ============

    a = ArgumentParser()
    a.add_argument('-c', dest='config', required=True, type=str)
    a.add_argument('-ncores',
                   default=1,
                   type=int,
                   help='Number of cores for parallel processing')
    opts = a.parse_args()

    # ===============================================

    hp = load_config(opts.config)
    assert hp.attention_guide_dir

    dataset = load_data(hp)
    fpaths, text_lengths = dataset['fpaths'], dataset['text_lengths']

    assert os.path.exists(hp.coarse_audio_dir)
    safe_makedir(hp.attention_guide_dir)

    executor = ProcessPoolExecutor(max_workers=opts.ncores)
    futures = []
    for (fpath, text_length) in zip(fpaths, text_lengths):
        futures.append(executor.submit(proc, fpath, text_length, hp))
    proc_list = [future.result() for future in tqdm.tqdm(futures)]
コード例 #2
0
def run():
    config = configuration.load_config("config/config.yml")
    db = save_solution(config.get("", ""))
    solutions = get_solutions(config.get("sizeBoard", ""))
    _keys = solutions.keys()
    [db.insert_solution_by_n(solutions[x], x) for x in _keys
     if len(solutions[x]) > 0]
コード例 #3
0
def main_work():

    #################################################

    # ============= Process command line ============

    a = ArgumentParser()
    a.add_argument('-c', dest='config', required=True, type=str)
    a.add_argument('-ncores',
                   default=1,
                   type=int,
                   help='Number of cores for parallel processing')
    opts = a.parse_args()

    # ===============================================

    hp = load_config(opts.config)

    fpaths = sorted(glob.glob(hp.waveforms + '/*.wav'))

    safe_makedir(hp.coarse_audio_dir)
    safe_makedir(hp.full_audio_dir)
    safe_makedir(hp.full_mel_dir)

    executor = ProcessPoolExecutor(max_workers=opts.ncores)
    futures = []
    for fpath in fpaths:
        futures.append(executor.submit(proc, fpath, hp))
    proc_list = [future.result() for future in tqdm.tqdm(futures)]
コード例 #4
0
ファイル: synthesize.py プロジェクト: thetobysiu/ophelia
def main_work():

    #################################################
      
    # ============= Process command line ============

    a = ArgumentParser()
    a.add_argument('-c', dest='config', required=True, type=str)
    a.add_argument('-speaker', default='', type=str)
    a.add_argument('-N', dest='num_sentences', default=0, type=int)
    a.add_argument('-babble', action='store_true')
    a.add_argument('-ncores', type=int, default=1, help='Number of CPUs for Griffin-Lim stage')
    a.add_argument('-odir', type=str, default='', help='Alternative place to put output samples')

    a.add_argument('-t2m_epoch', default=-1, type=int, help='Default: use latest (-1)')
    a.add_argument('-ssrn_epoch', default=-1, type=int, help='Default: use latest (-1)')
    
    opts = a.parse_args()
    
    # ===============================================
    hp = load_config(opts.config)
    
    outdir = opts.odir
    if outdir:
        outdir = os.path.join(outdir, basename(opts.config))

    if hp.multispeaker:
        assert opts.speaker, 'Please specify a speaker from speaker_list with -speaker flag'
        assert opts.speaker in hp.speaker_list

    if opts.babble:
        babble(hp, num_sentences=opts.num_sentences)
    else:
        synthesize(hp, speaker_id=opts.speaker, num_sentences=opts.num_sentences, \
                ncores=opts.ncores, topoutdir=outdir, t2m_epoch=opts.t2m_epoch, ssrn_epoch=opts.ssrn_epoch)
コード例 #5
0
def main_work():

    #################################################
      
    # ======== Get stuff from command line ==========

    a = ArgumentParser()
    a.add_argument('-c', dest='config', required=True, type=str)
    a.add_argument('-ncores', type=int, default=1)
    opts = a.parse_args()
    
    # ===============================================

    hp = load_config(opts.config)
    
    ### 1) convert saved coarse mels to mags with latest-trained SSRN
    print('mel2mag: restore last saved SSRN')
    g = SSRNGraph(hp,  mode="synthesize")
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        
        ## TODO: use restore_latest_model_parameters from synthesize?
        var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'SSRN') 
        saver2 = tf.train.Saver(var_list=var_list)
        savepath = hp.logdir + "-ssrn"        
        latest_checkpoint = tf.train.latest_checkpoint(savepath)
        if latest_checkpoint is None: sys.exit('No SSRN at %s?'%(savepath))
        ssrn_epoch = latest_checkpoint.strip('/ ').split('/')[-1].replace('model_epoch_', '')
        saver2.restore(sess, latest_checkpoint)
        print("SSRN Restored from latest epoch %s"%(ssrn_epoch))

        filelist = glob.glob(hp.logdir + '-t2m/validation_epoch_*/*.npy')
        filelist = [fname for fname in filelist if not fname.endswith('.mag.npy')]
        batch, lengths = make_mel_batch(hp, filelist, oracle=False)
        Z = synth_mel2mag(hp, batch, g, sess, batchsize=32)
        print ('synthesised mags, now splitting batch:')
        maglist = split_batch(Z, lengths)
        for (infname, outdata) in tqdm.tqdm(zip(filelist, maglist)):
            np.save(infname.replace('.npy','.mag.npy'), outdata)



    ### 2) GL in parallel for both t2m and ssrn validation set 
    print('GL for SSRN validation')
    filelist = glob.glob(hp.logdir + '-t2m/validation_epoch_*/*.mag.npy') + \
               glob.glob(hp.logdir + '-ssrn/validation_epoch_*/*.npy')

    if opts.ncores==1:
        for fname in tqdm.tqdm(filelist):
            synth_wave(hp, fname)
    else:
        executor = ProcessPoolExecutor(max_workers=opts.ncores)    
        futures = []
        for fpath in filelist:
            futures.append(executor.submit(synth_wave, hp, fpath))
        proc_list = [future.result() for future in tqdm.tqdm(futures)]
コード例 #6
0
def main():
    notify_queue = queue.Queue()

    configure_logger(notify_queue)
    config = configuration.load_config()

    main_window = views.MainWindow(notify_queue, config)
    main_window.calculate_callback = handle_calculate
    main_window.save_options_callback = handle_save_options
    main_window.mainloop()
コード例 #7
0
def option_load(**kwargs):
    """
    The code run when the load argument is given. This will load the configuration of a given name and return it.
    :param kwargs: A dictionary of arguments. This expects 'opts' as a list of options created by getopt and
                   'iterator' as the current iterator being used.
    :return: The new configuration object returned in a dictionary with the key 'config'.
    """
    opts = kwargs["opts"]
    iterator = kwargs["iterator"]
    config_name = opts[iterator.current][1]
    print("Loading {}.dat...".format(config_name))
    return {"config": configuration.load_config(config_name)}
コード例 #8
0
def generate_report():
    broker_config = load_config("broker.ini")
    api_config = get_api_config()
    api = Api(api_config)

    street_orders = api.get_street_orders()

    for street_order in street_orders:
        parent_order = api.get_parent_order(street_order.parent_id)
        formatted_entry = format_oats(street_order, parent_order,
                                      broker_config)

        yield formatted_entry
コード例 #9
0
ファイル: run_experiments.py プロジェクト: niksaz/sim2real
def main():
    original_config_path = 'configs/unit/duckietown_unit.yaml'
    config = configuration.load_config(original_config_path)
    iterations = 100000
    channels = 16
    tag_to_update_params_items = []
    for map_label, map_path in [
        ('DALP', 'home/zerogerc/msazanovich/aido3/data/daffy_loop_empty'),
        ('DAIS', 'home/zerogerc/msazanovich/aido3/data/daffy_udem1')
    ]:
        for use_tcc_loss in [True, False]:
            for use_triplet_loss in [True, False]:
                if use_tcc_loss:
                    tcc_w = 0.1
                else:
                    tcc_w = 0.0
                if use_triplet_loss:
                    triplet_w = 0.1
                    triplet_margin = 1.0
                else:
                    triplet_w = 0.0
                    triplet_margin = 0.0
                tag = (
                    f'T46-{map_label}2DUCK-{channels}'
                    f'-TCC-{tcc_w}-TRIPLET-{triplet_w}-MAR-{triplet_margin}-{iterations_to_desc(iterations)}'
                )
                update_params = {
                    'hyperparameters/loss/tcc_w':
                    tcc_w,
                    'hyperparameters/loss/triplet_w':
                    triplet_w,
                    'hyperparameters/loss/triplet_margin':
                    triplet_margin,
                    'hyperparameters/iterations':
                    iterations,
                    'hyperparameters/gen/ch':
                    channels,
                    'hyperparameters/dis/ch':
                    channels,
                    'datasets/general/datasets_dir':
                    '/',
                    'datasets/domain_a/dataset_path':
                    map_path,
                    'datasets/domain_b/dataset_path':
                    'home/zerogerc/msazanovich/duckietown-data/aido3/duckietown',
                }
                tag_to_update_params_items.append((tag, update_params))
    for index in itertools.count():
        item_index = index % len(tag_to_update_params_items)
        tag, update_params = tag_to_update_params_items[item_index]
        run_experiment(config, update_params, tag)
コード例 #10
0
def main_work():

    #################################################
      
    # ============= Process command line ============

    a = ArgumentParser()
    a.add_argument('-c', dest='config', required=True, type=str)
    a.add_argument('-o', dest='outdir', required=True, type=str)    
    opts = a.parse_args()
    
    # ===============================================
    
    hp = load_config(opts.config)
    copy_synth_SSRN_GL(hp, opts.outdir)
コード例 #11
0
def main():
    config_path = Path.cwd().joinpath(CONFIGS_DIR).joinpath(CONFIG_FILENAME)
    config = load_config(config_path)

    dataset_dir = config['dataset_dir']

    print(f'Getting speakers data from {dataset_dir}')
    speakers = get_speakers_data(dataset_dir)

    csv_dir = config['csv_path']
    csv_path = Path(csv_dir).joinpath(CSV_FILENAME)

    print(f'Writing speakers data to {csv_path}')
    write_csv_file(csv_path, speakers)

    print('Done')
コード例 #12
0
def configure():
    """
    Update config
    """
    jira_url = utils.get_input(raw_input, "Jira url")
    username = utils.get_input(raw_input, "username")
    password = utils.get_input(getpass.getpass, "password")
    error_reporting = True \
        if 'n' not in raw_input("Would you like to automatically report errors to help improve the software? [y]/N: ").lower() \
        else False
    configuration._save_config(jira_url, username, password, error_reporting)

    try:
        connection.jira_connection(configuration.load_config())
    except jira_exceptions.JIRAError as e:
        configuration._delete_config()
        logging.error("You have an error in your jira connection/configuration: {error}. Please fix the configuration before attempting to use jtime.\n We suggest trying your username without using the email address.".format(error=e))
コード例 #13
0
ファイル: train.py プロジェクト: niksaz/sim2real
def main():
    args = configuration.parse_args()
    config = configuration.load_config(args.config_path)
    utils.setup_logging()
    logging.info(f'args: {args}')
    logging.info(f'config: {config}')

    utils.fix_random_seeds(config['hyperparameters']['seed'])

    a_train_dataset, a_test_dataset, a_test_length = create_image_action_dataset(
        config, 'domain_a')
    b_train_dataset, b_test_dataset, b_test_length = create_image_action_dataset(
        config, 'domain_b')

    trainer = create_models_and_trainer(config)

    output_dir, (samples_dir, summaries_dir,
                 checkpoints_dir) = utils.create_output_dirs(
                     args.output_dir_base, 'unit', args.tag,
                     ['samples', 'summaries', 'checkpoints'])
    configuration.dump_config(config, os.path.join(output_dir, 'config.yaml'))

    checkpoint = reload_checkpoint(trainer, checkpoints_dir,
                                   config['restore_path'])
    summary_writer = tf.summary.create_file_writer(summaries_dir)

    with summary_writer.as_default():
        datasets = [(a_train_dataset, a_test_dataset),
                    (b_train_dataset, b_test_dataset)]
        test_iterations = max(a_test_length, b_test_length)
        main_loop(trainer, datasets, test_iterations, config, checkpoint,
                  samples_dir)

    if args.summarize:
        trainer.model.encoder_a.model.summary()
        trainer.model.encoder_b.model.summary()
        trainer.model.encoder_shared.model.summary()
        trainer.model.decoder_shared.model.summary()
        trainer.model.decoder_b.model.summary()
        trainer.model.decoder_a.model.summary()
        trainer.model.downstreamer.model.summary()
        trainer.controller.model.summary()
        trainer.model.dis_a.model.summary()
        trainer.model.dis_b.model.summary()
コード例 #14
0
def main():
    config_path = Path(CONFIGS_DIR).joinpath(CONFIG_FILENAME)
    config = load_config(config_path)

    csv_dir = config['csv_path']
    csv_path = Path(csv_dir).joinpath(CSV_FILENAME)

    print(f'Reading data from {csv_path}')
    data = pd.read_csv(csv_path, header=0)

    split_category = config['split_by']
    test_ratio = config['test_ratio']

    print(f'Splitting by {split_category} with test ratio of {test_ratio}')
    split_column = data.columns.get_loc(split_category)

    print_statistics(data, split_column)

    train_set, test_set = split(data, split_column, test_ratio)

    print()
    print('TRAIN')
    print('=====')
    print_statistics(train_set, split_column)

    print()
    print('TEST')
    print('=====')
    print_statistics(test_set, split_column)

    train_csv_filename = f'train_by_{split_category.lower()}.csv'
    csv_path = Path(csv_dir).joinpath(train_csv_filename)

    print()
    print(f'Writing training set to {csv_path}')
    pd.DataFrame(train_set).to_csv(csv_path, index=False)

    test_csv_filename = f'test_by_{split_category.lower()}.csv'
    csv_path = Path(csv_dir).joinpath(test_csv_filename)

    print(f'Writing testing set to {csv_path}')
    pd.DataFrame(train_set).to_csv(csv_path, index=False)
コード例 #15
0
    def __init__(self, *args, config_path: Optional[str] = None, **kwargs):
        """Initialize Cogsworth class."""
        HTTPServer.__init__(self, *args, **kwargs)
        self._started = int(time())
        self._requests_successful = 0
        self._requests_warning = 0
        self._requests_error = 0
        self._requests_bad = 0

        if config_path is None:
            raise TypeError('Cogsworth missing 1 required keyword argument:'
                            ' \'config_path\'')

        schema_path = Path(dirname(__file__)) / 'schemas' / \
            'configuration.cogsworth.schema.json'

        config = load_config(config_path, schema_path)

        self._scheduler = Scheduler(config)
        self._scheduler.start()
コード例 #16
0
def main():
        
    if len(sys.argv) == 1:
        print "No image file supplied"
        sys.exit()
    
    if len(sys.argv) > 2:
        print "You can only open one png file at a time"
        sys.exit()
    
    if not os.path.exists(sys.argv[1]):
        print "Cannot open %s. No such file."%sys.argv[1]
        sys.exit()
    
    if not sys.argv[1].endswith(('.png','.PNG')):
        print "Cannot open %s. Not a png file (is the file extension correct?)."%sys.argv[1]
        sys.exit()
    
    config = configuration.load_config()
    
    main_script.ScanSummaryFrame(config, False, True, from_png_file=sys.argv[1])
コード例 #17
0
    def test_joint_train_iteration(self):
        config_path = os.path.join('configs', 'unit', 'duckietown_unit.yaml')
        config = configuration.load_config(config_path)

        trainer = train.create_models_and_trainer(config)

        a_train_dataset, a_test_dataset, a_test_length = train.create_image_action_dataset(
            config, 'domain_a')
        b_train_dataset, b_test_dataset, b_test_length = train.create_image_action_dataset(
            config, 'domain_b')

        a_dataset_iter = iter(a_train_dataset)
        b_dataset_iter = iter(b_train_dataset)
        images_a, actions_a = next(a_dataset_iter)
        images_b, _ = next(b_dataset_iter)

        time_start = time.time()
        trainer.joint_train_step(images_a, actions_a, images_b)
        time_ended = time.time()
        time_spent = time_ended - time_start
        print(f'Time spent: {time_spent:.4f}s')
コード例 #18
0
def try_to_train(train_fn, try_block=True, overwrite=False, **kargs):
    """Wrapper for the main training function."""
    config = conf.Config(**kargs)
    config.overwrite_safety_check(overwrite)
    if config.resume_training:
        print('INFO: Resuming training from checkpoint.')
        fp = os.path.join(config.log_path, 'config.pkl')
        config = conf.load_config(fp)
        config.resume_training = True
        config.checkpoint_path = kargs.pop('log_path')
        config.lr_end = kargs.pop('lr_end')
        config.max_epoch = kargs.pop('max_epoch')
    else:
        config.save_config_to_file()
    if try_block:
        try:
            train_fn(config)
        except KeyboardInterrupt:
            raise KeyboardInterrupt
        except:
            error_log = sys.exc_info()
            traceback_extract = tb.format_list(tb.extract_tb(error_log[2]))
            if not os.path.exists(config.log_path):
                os.makedirs(config.log_path)
            err_msg = 'Error occured:\r\n\r\n%s\r\n' % str(error_log[0])
            err_msg += '%s\r\n%s\r\n\r\n' % (str(
                error_log[1]), str(error_log[2]))
            err_msg += '\r\n\r\nTraceback stack:\r\n\r\n'
            for entry in traceback_extract:
                err_msg += '%s\r\n' % str(entry)
            name = 'error__' + os.path.split(config.log_path)[1] + '.txt'
            with open(os.path.join(os.path.dirname(config.log_path), name),
                      'w') as f:
                f.write(err_msg)
            print('\nWARNING: An error has occurred.\n')
            print(err_msg)
            #tf.reset_default_graph()
    else:
        train_fn(config)
コード例 #19
0
ファイル: __init__.py プロジェクト: ska-sa/montblanc
def rime_solver_cfg(**kwargs):
    """
    Produces a SolverConfiguration object, inherited from
    a simple python dict, and containing the options required
    to configure the RIME Solver.

    Keyword arguments
    -----------------
    Any keyword arguments are inserted into the
    returned dict.

    Returns
    -------
    A SolverConfiguration object.
    """
    from configuration import (load_config, config_validator,
        raise_validator_errors)

    def _merge_copy(d1, d2):
        return { k: _merge_copy(d1[k], d2[k]) if k in d1
                                                and isinstance(d1[k], dict)
                                                and isinstance(d2[k], dict)
                                            else d2[k] for k in d2 }

    try:
        cfg_file = kwargs.pop('cfg_file')
    except KeyError as e:
        slvr_cfg = kwargs
    else:
        cfg = load_config(cfg_file)
        slvr_cfg = _merge_copy(cfg, kwargs)

    # Validate the configuration, raising any errors
    validator = config_validator()
    validator.validate(slvr_cfg)
    raise_validator_errors(validator)

    return validator.document
コード例 #20
0
def menu_option_load(config, config_list):
    """
    The code that is run when the menu option for loading a configuration from a file is selected.
    This will show the user the configurations they can load, then prompt for input on which to load.
    :param config: The current backup configuration.
    :param config_list: A string listing all the possible configurations that can be loaded.
    :return: The newly loaded configuration.
    """
    print("List of available saved configurations:")
    print(config_list)
    while True:
        # Once a valid name is entered, load that configuration
        config_name = input(
            "Enter a name of a configuration to load (enter \"end\" to return to the menu): "
        )
        if config_name == "end":
            break
        elif configuration.config_exists(config_name):
            config = configuration.load_config(config_name)
            break
        else:
            print("{} is an invalid configuration name.".format(config_name))
    return config
コード例 #21
0
    def test_dataset_iteration(self):
        config_path = os.path.join('configs', 'unit', 'duckietown_unit.yaml')
        config = configuration.load_config(config_path)
        a_train_dataset, a_test_dataset, a_test_length = train.create_image_action_dataset(
            config, 'domain_a')
        b_train_dataset, b_test_dataset, b_test_length = train.create_image_action_dataset(
            config, 'domain_b')

        for dataset in [
                a_train_dataset, a_test_dataset, b_train_dataset,
                b_test_dataset
        ]:
            time_start = time.time()
            for batch_tuple in dataset:
                print(
                    f'Dataset has been tested. The length of the batch tuple is {len(batch_tuple)}.'
                )
                for batch_element in batch_tuple:
                    print(batch_element.shape)
                break
            time_ended = time.time()
            time_spent = time_ended - time_start
            print(f'Time spent: {time_spent:.4f}s')
コード例 #22
0
def run(args):
    CONFIG_DICT = configuration.load_config(args.model, args.dataset)
    globals().update(CONFIG_DICT)

    directory = make_dir_to_save_results(args.model, args.dataset,
                                         args.contamratio)
    if args.dataset == 'kdd99':
        outcome_text_file = '{}/outcome_{}_{}_contam ratio={}_replications={}.txt'.format(
            directory, args.model, args.dataset, args.contamratio, args.number)
        log_name = 'ALAD' + '_' + '{}_contam ratio={}_'.format(
            args.dataset, args.contamratio) + 'timestamp=' + '_'.join(
                [str(s) for s in list(time.localtime(time.time())[1:6])])
    elif (args.dataset == 'mnist') or (args.dataset == 'fmnist'):
        outcome_text_file = '{}/outcome_{}_{}_contam ratio={}_normal class={}_replications={}.txt'.format(
            directory, args.model, args.dataset, args.contamratio,
            args.normalclass, args.number)
        log_name = 'ALAD' + '_' + '{}_contam ratio={}_normal class={}_'.format(
            args.dataset, args.contamratio,
            args.normalclass) + 'timestamp=' + '_'.join(
                [str(s) for s in list(time.localtime(time.time())[1:6])])
    elif args.dataset == 'celeba':
        outcome_text_file = '{}/outcome_{}_{}_contam ratio={}_attribute={}_normal class={}_replications={}.txt'.format(
            directory, args.model, args.dataset, args.contamratio,
            args.attribute, args.normalclass, args.number)
        log_name = 'ALAD' + '_' + '{}_contam ratio={}_normal class={}_attribute={}_'.format(
            args.dataset, args.contamratio, args.normalclass,
            args.attribute) + 'timestamp=' + '_'.join(
                [str(s) for s in list(time.localtime(time.time())[1:6])])

    # For logging(to be fixed)
    if os.path.exists('../loggings/alad/{}'.format(args.dataset)) is False:
        os.makedirs('../loggings/alad/{}'.format(args.dataset))

    if os.path.exists('../loggings/alad/{}/{}.log'.format(
            args.dataset, log_name)) is True:
        os.remove('../loggings/alad/{}/{}.log'.format(args.dataset, log_name))
    logging.basicConfig(filename='../loggings/alad/{}/{}.log'.format(
        args.dataset, log_name),
                        level=logging.INFO)
    stderrLogger = logging.StreamHandler()
    stderrLogger.setFormatter(
        logging.Formatter(
            '[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s > %(message)s'
        ))
    logging.getLogger().addHandler(stderrLogger)
    logging.info('File saved: {}'.format(log_name))

    with open('{}'.format(outcome_text_file), 'w') as f:
        f.write('Corresponding log_name : {}'.format(log_name))
        f.write(
            '7 metrics are ' +
            '[accuracy, specificity, precision, recall, f1, roc_auc, auprc]' +
            '\n')

    store_performance = []
    if args.decide == 'int':
        args.number = int(args.number)
    else:
        args.number = list(map(int, args.number.split(',')))

    if type(args.number) is int:
        seed_tuple = range(args.number)
    elif type(args.number) is list:
        seed_tuple = args.number

    for random_seed in seed_tuple:
        torch.manual_seed(random_seed)
        np.random.seed(random_seed)
        tf.set_random_seed(random_seed)
        torch.cuda.manual_seed(random_seed)

        device = 'cuda'
        if not torch.cuda.is_available():
            device = 'cpu'
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
        torch.cuda.set_device(args.gpu)

        logging.info('GPU NUM: %d' % args.gpu)
        logging.info('###########Data Property#############')
        logging.info('Train ratio: %d' % train_ratio)
        logging.info('Test ratio: %d' % test_ratio)
        logging.info('Validation ratio: %d' % val_ratio)
        logging.info('Contamination ratio: %d' % args.contamratio)
        logging.info('Random seed: %d' % random_seed)
        if args.dataset != 'kdd99':
            logging.info('Normal class: %d' % args.normalclass)
            if args.dataset == 'celeba':
                logging.info('Attribute for CelebA: %d' % args.attribute)
        logging.info('######################################')

        performance = alad_trainer.run(args.dataset, args.epoch, args.degree,
                                       random_seed, args.normalclass,
                                       args.enable_dzz, args.enable_sm, args.m,
                                       args.enable_early_stop, args.sn,
                                       args.contamratio, args.gpu,
                                       args.attribute, directory)

        with open('{}'.format(outcome_text_file), 'a+') as f:
            f.write('the results of seed {} : '.format(random_seed) +
                    str(performance) + '\n')

        logging.info('GPU NUM: %d' % args.gpu)
        logging.info('###########Data Property#############')
        logging.info('Train ratio: %d' % train_ratio)
        logging.info('Test ratio: %d' % test_ratio)
        logging.info('Validation ratio: %d' % val_ratio)
        logging.info('Contamination ratio: %d' % args.contamratio)
        logging.info('Random seed: %d' % random_seed)
        if args.dataset != 'kdd99':
            logging.info('Normal class: %d' % args.normalclass)
            if args.dataset == 'celeba':
                logging.info('Attribute for CelebA: %d' % args.attribute)
        store_performance.append(performance)

    store_performance = np.reshape(store_performance, (-1, 7))

    print('[accuracy, specificity, precision, recall, f1, roc_auc, auprc]')
    logging.info("Mean Outcome of %s: " % log_name)
    logging.info(np.mean(store_performance, axis=0))
    logging.info("Minimum Outcome of %s: " % log_name)
    logging.info(np.min(store_performance, axis=0))
    logging.info("Std Outcome of %s: " % log_name)
    logging.info(np.std(store_performance, axis=0))

    summerize_performance(outcome_text_file, np.mean(store_performance,
                                                     axis=0),
                          np.min(store_performance, axis=0),
                          np.std(store_performance, axis=0))
コード例 #23
0
from .version import version as __version__
import configuration
from configuration import _branding
import backends
import storage
import utils
import sys

if sys.argv[0].endswith('%s-config' % (_branding, )):
    # Someone is calling <brandname>-config.
    # Do not try to load any configuration, as that is what they might be trying to fix!
    pass
else:
    # Load configuration
    config = configuration.load_config()
    # Get backend according to configuration
    backend = backends.get_backend(config)

    # Get functions from backend
    ls = backend.ls
    iter_ls = backend.iter_ls
    ls_se = backend.ls_se
    iter_ls_se = backend.iter_ls_se
    is_dir = backend.is_dir
    is_dir_se = backend.is_dir_se
    replicas = backend.replicas
    get_file_source = backend.get_file_source
    iter_file_sources = backend.iter_file_sources
    is_file = backend.is_file
    is_file_se = backend.is_file_se
コード例 #24
0
import asyncio
from datetime import datetime
import discord
from discord.ext import commands
from discord.utils import get
from configuration import configuration, load_config

cfg = load_config('config.json')

bot = commands.Bot(command_prefix=commands.when_mentioned_or(cfg.prefix),
                   description=cfg.description,
                   pm_help=True)


def log(message):
    print(f'{str(datetime.now())[:-7]}: {message}', flush=True)


@bot.event
async def on_ready():
    log('Connected!')
    log(f'Username: {bot.user.name}')
    log(f'ID: {bot.user.id}')


@bot.event
async def on_message_edit(before, after):
    if not after.channel.guild:
        log(f'Direct message>{str(after.author)}: "{before.content}" --> "{after.content}"'
            )
    else:
コード例 #25
0
from configuration import load_config
from data_load import *
import numpy as np
from synthesize import *
from synthesize_with_latent_space import compute_opensmile_features, load_features, load_embeddings
from architectures import *
import soundfile as sf

conf_file = '/home/noetits/doctorat_code/ophelia/config/blizzard_unsupervised_letters.cfg'
hp = load_config(conf_file)
model_type = 'unsup'
logdir = hp.logdir + "-" + model_type
method = 'pca'


def synthesize_speech_laugh(hp):
    text1 = 'this is just your ima'
    text2 = 'imagination'
    tts = tts_model(hp)
    Y, Z, alignments = tts.synthesize(text=text1)
    Y2, Z2, alignments = tts.synthesize(text=text2)


##########################################

# test opensmile
conf_path = './tools/opensmile-2.3.0/config/gemaps/eGeMAPSv01a.conf'
conf_name = conf_path.split('/')[-1].split('.')[0]
#text_to_phonetic(text='this is a text')
dataset = load_data(hp, mode='demo', audio_extension='.flac')
#dataset=load_data(hp)
コード例 #26
0
def main_work():
    # ============= Process command line ============
    a = ArgumentParser()
    a.add_argument('-c', dest='config', required=True, type=str)
    a.add_argument('-m',
                   dest='model_type',
                   required=True,
                   choices=['t2m', 'unsup'])
    a.add_argument('-t',
                   dest='task',
                   required=True,
                   choices=[
                       'acoustic_analysis', 'compute_codes', 'reduce_codes',
                       'compute_opensmile_features', 'show_plot', 'ICE_TTS',
                       'ICE_TTS_server'
                   ])
    a.add_argument('-r',
                   dest='reduction_method',
                   required=False,
                   choices=['pca', 'tsne', 'umap'])
    a.add_argument('-p', dest='port', required=False, type=int, default=5000)
    a.add_argument('-s', dest='set', required=False, type=str, default='train')
    opts = a.parse_args()
    print('opts')
    print(opts)
    # ===============================================
    model_type = opts.model_type
    method = opts.reduction_method
    hp = load_config(opts.config)
    logdir = hp.logdir + "-" + model_type
    port = opts.port

    mode = opts.set

    config_name = opts.config.split('/')[-1].split('.')[0]

    logger_setup.logger_setup(logdir)
    info('Command line: %s' % (" ".join(sys.argv)))
    print(logdir)
    task = opts.task
    if task == 'compute_codes':
        if model_type == 't2m':
            g = Text2MelGraph(hp, mode="synthesize")
            print("Graph 1 (t2m) loaded")
        elif model_type == 'unsup':
            g = Graph_style_unsupervised(hp, mode="synthesize")
            print("Graph 1 (unsup) loaded")
        codes = compute_unsupervised_embeddings(hp, g, model_type, mode=mode)
        save_embeddings(codes, logdir, mode=mode)
        #emo_cats=get_emo_cats(hp)
        #save(emo_cats, logdir, filename='emo_cats')
    elif task == 'reduce_codes':
        try:
            embed = load_embeddings(logdir, mode=mode)[:, 0, :]
        except IndexError:  # I may have changed the shape of the matrix ...
            embed = load_embeddings(logdir, mode=mode)
        #import pdb;pdb.set_trace()
        model, results = embeddings_reduction(embed, method=method)
        save_embeddings(results,
                        logdir,
                        filename='emo_codes_' + method,
                        mode=mode)
        save(model, logdir, filename='code_reduction_model_' + method)
    elif task == 'compute_opensmile_features':
        compute_opensmile_features(hp, audio_extension='.wav', mode=mode)
    elif task == 'show_plot':
        embed = load_embeddings(logdir, filename='emo_codes_' + method)
        scatter_plot(embed)
    elif task == 'ICE_TTS':
        from interface import ICE_TTS
        embed = load_embeddings(logdir)[:, 0, :]
        embed_reduc = load_embeddings(logdir, filename='emo_codes_' + method)
        from PyQt5.QtWidgets import QApplication
        app = QApplication(sys.argv)
        ice = ICE_TTS(hp, embed_reduc, embed)
        ice.show()
        sys.exit(app.exec_())
    elif task == 'ICE_TTS_server':

        # import pdb;pdb.set_trace()
        from server.ice_tts_server import ICE_TTS_server
        try:
            embed = load_embeddings(logdir, mode=mode)[:, 0, :]
        except IndexError:  # I may have changed the shape of the matrix ...
            embed = load_embeddings(logdir, mode=mode)

        print('Loading embeddings')
        embed_reduc = load_embeddings(logdir,
                                      filename='emo_codes_' + method,
                                      mode=mode)

        from itertools import product
        train_codes_pca = np.load(
            os.path.join(logdir, 'emo_codes_pca_train.npy'))

        pca_model = pickle.load(
            open(os.path.join(logdir, 'code_reduction_model_pca.pkl'), 'rb'))
        min_xy = train_codes_pca.min(axis=0)
        max_xy = train_codes_pca.max(axis=0)
        xs = np.mgrid[min_xy[0]:max_xy[0]:100j]
        ys = np.mgrid[min_xy[1]:max_xy[1]:100j]
        X = np.array(list(product(xs, ys)))
        codes = pca_model.inverse_transform(X)

        # X=np.load('X.npy')
        # codes=np.load('codes.npy')

        print('Loading emo cats')
        emo_cats = get_emo_cats(hp)
        #emo_cats=load(logdir, filename='emo_cats')
        #import pdb;pdb.set_trace()
        ice = ICE_TTS_server(hp,
                             X,
                             codes,
                             emo_cats,
                             model_type=model_type,
                             port=port)
        # ice=ICE_TTS_server(hp, embed_reduc, embed, emo_cats, model_type=model_type, port=port)
        #ice=ICE_TTS_server(hp, embed_reduc, embed, model_type=model_type)
        #ice=ICE_TTS_server(hp, embed_reduc, embed, n_polar_axes=4, model_type=model_type)

    elif task == 'acoustic_analysis':

        directory = 'results/' + config_name
        if not os.path.exists(directory):
            os.makedirs(directory)

        import seaborn as sns
        from sklearn.feature_selection import SelectKBest
        from sklearn.feature_selection import f_regression
        from sklearn.linear_model import LinearRegression
        from pandas.plotting import scatter_matrix
        # from pandas.plotting._matplotlib.misc import scatter_matrix
        import matplotlib.pyplot as plt
        from scipy.stats import pearsonr
        import itertools

        print('MODE', mode)
        try:
            embed = load_embeddings(logdir, mode=mode)[:, 0, :]
            embed_valid = load_embeddings(logdir, mode='validation')[:, 0, :]
        except IndexError:  # I may have changed the shape of the matrix ...
            embed = load_embeddings(logdir, mode=mode)
            embed_valid = load_embeddings(logdir, mode='validation')

        conf_name = 'eGeMAPSv01a'
        feature_path = os.path.join(hp.featuredir, 'opensmile_features',
                                    conf_name, 'feat_df_' + mode + '.csv')
        feat_df = pd.read_csv(feature_path)
        feat_df = feat_df.drop(columns=['Unnamed: 0'])

        feature_path = os.path.join(hp.featuredir, 'opensmile_features',
                                    conf_name,
                                    'feat_df_' + 'validation' + '.csv')
        feat_df_valid = pd.read_csv(feature_path)
        #import pdb;pdb.set_trace()
        feat_df_valid = feat_df_valid.drop(columns=['Unnamed: 0'])

        feat_df = abbridge_column_names(feat_df)
        feat_df_valid = abbridge_column_names(feat_df_valid)

        # Mean normalization (with same mean and variance computed from training data)
        feat_df = (feat_df - feat_df.mean()) / feat_df.std()
        feat_df_valid = (feat_df_valid - feat_df.mean()) / feat_df.std()

        model, coeff_df = regression_feat_embed(pd.DataFrame(embed), feat_df)
        corrs_embed_df = test_regression(model, pd.DataFrame(embed_valid),
                                         feat_df_valid)
        print('Correlations:')
        print(corrs_embed_df.sort_values(0)[::-1][:20])
        corrs_embed_df.sort_values(0)[::-1][:20].to_csv(directory +
                                                        '/correlations.csv')

        selected = select_features(corrs_embed_df,
                                   feat_df_valid,
                                   intra_corr_thresh=0.7,
                                   corr_thresh=0.3)
        print(selected.to_latex().replace('\_sma3', ' ').replace(
            'nz',
            '').replace('\_',
                        '').replace('amean',
                                    'mean').replace('semitoneFrom27.5Hz', ''))
        selected.to_csv(directory + '/selected_correlations.csv')

        # print('Gradients:')
        # print(coeff_df)

        #method='pca'

        embed_reduc = load_embeddings(logdir,
                                      filename='emo_codes_' + method,
                                      mode=mode)
        embed_reduc_valid = load_embeddings(logdir,
                                            filename='emo_codes_' + method,
                                            mode='validation')

        model_reduc, coeff_reduc_df = regression_feat_embed(
            pd.DataFrame(embed_reduc), feat_df)
        corrs_embed_reduc_df = test_regression(model_reduc,
                                               pd.DataFrame(embed_reduc_valid),
                                               feat_df_valid)
        print('Correlations:')
        print(corrs_embed_reduc_df.sort_values(0)[::-1][:20])
        corrs_embed_df.sort_values(0)[::-1][:20].to_csv(
            directory + '/correlations_reduc.csv')

        selected_reduc = select_features(corrs_embed_reduc_df,
                                         feat_df_valid,
                                         intra_corr_thresh=0.7,
                                         corr_thresh=0.25)
        print(selected.to_latex().replace('\_sma3', ' ').replace(
            'nz',
            '').replace('\_',
                        '').replace('amean',
                                    'mean').replace('semitoneFrom27.5Hz', ''))
        selected_reduc.to_csv(directory + '/selected_correlations_reduc.csv')

        feat_predictions_df = pd.DataFrame(model.predict(embed))
        feat_predictions_df.index = feat_df.index
        feat_predictions_df.columns = feat_df.columns

        feat_df[selected.index]
        feat_predictions_df[selected.index]

        # just checking it seems correct
        # print(pearsonr(feat_df[selected.index]['F0semitoneFrom27.5Hz_sma3nz_percentile50.0'],feat_predictions_df[selected.index]['F0semitoneFrom27.5Hz_sma3nz_percentile50.0'] ))

        # selected_feats=selected.index.to_list()
        # fig, axs = plt.subplots(nrows=sc.shape[0], ncols=sc.shape[1], figsize=(100, 100))
        # for pair in itertools.product(range(len(selected)), repeat=2):
        #     x=feat_df[selected_feats[pair[0]]]
        #     y=feat_predictions_df[selected_feats[pair[1]]]
        #     axs[pair[0], pair[1]].scatter(x, y, alpha=0.2)
        # fig.savefig('figures/scatter_matrix.png')

        h = 100
        selected_feats = selected.index.to_list()
        fig, axs = plt.subplots(nrows=len(selected),
                                ncols=1,
                                figsize=(h / len(selected) * 3, h))
        for i in range(len(selected)):
            x = feat_df[selected_feats[i]]
            y = feat_predictions_df[selected_feats[i]]
            axs[i].scatter(x, y, alpha=0.2)
        fig.savefig(directory + '/scatter_plots_feats.png')

        #print(corrs_embed_reduc_df)
        print('Gradients:')
        print(coeff_reduc_df)
        coeff_reduc_df.to_csv(directory + '/gradients.csv')

        normalized_gradients = coeff_reduc_df.div(
            ((coeff_reduc_df**2).sum(axis=1))**0.5, axis=0)

        plt.cla()
        plt.clf()
        plt.close()
        # sc=scatter_plot(embed_reduc, c=feat_df['F0semitoneFrom27.5Hz_sma3nz_amean'].values)
        sc = scatter_plot(embed_reduc, c=feat_df['F0 mean'].values)
        plot_gradients(normalized_gradients,
                       selected_reduc,
                       ax=sc.get_figure().gca())
        sc.get_figure().savefig(directory + '/scatter_F0_mean_' + method +
                                '.png')

        plt.cla()
        plt.clf()
        plt.close()
        # sc=scatter_plot(embed_reduc, c=feat_df['F0semitoneFrom27.5Hz_sma3nz_amean'].values)
        sc = scatter_plot(embed_reduc, c=feat_df['F0 percentile50.0'].values)
        plot_gradients(normalized_gradients,
                       selected_reduc,
                       ax=sc.get_figure().gca())
        sc.get_figure().savefig(directory + '/scatter_F0_percentile50.0_' +
                                method + '.png')

        print(feat_df.columns)
        # import pdb;pdb.set_trace()
        plt.cla()
        plt.clf()
        plt.close()
        # sc=scatter_plot(embed_reduc, c=feat_df['F0semitoneFrom27.5Hz_sma3nz_amean'].values)
        sc = scatter_plot(embed_reduc,
                          c=feat_df['F3amplitudeLogRelF0 stdNorm'].values)
        plot_gradients(normalized_gradients,
                       selected_reduc,
                       ax=sc.get_figure().gca())
        sc.get_figure().savefig(directory +
                                '/scatter_F3amplitudeLogRelF0_stdNorm_' +
                                method + '.png')

        plt.cla()
        plt.clf()
        plt.close()
        # sc=scatter_plot(embed_reduc, c=feat_df['F0semitoneFrom27.5Hz_sma3nz_amean'].values)
        sc = scatter_plot(embed_reduc,
                          c=feat_df['stdVoicedSegmentLengthSec'].values)
        plot_gradients(normalized_gradients,
                       selected_reduc,
                       ax=sc.get_figure().gca())
        sc.get_figure().savefig(directory +
                                '/scatter_stdVoicedSegmentLengthSec_' +
                                method + '.png')

        plt.cla()
        plt.clf()
        plt.close()
        hist = sns.distplot(feat_df['F0 mean'])
        hist.get_figure().savefig(directory + '/hist_F0_mean_' + method +
                                  '.png')

        # hist=sns.distplot(feat_df['F3amplitudeLogRelF0 stddevNorm'])
        # hist.get_figure().savefig('figures/hist_F3amplitudeLogRelF0_stddevNorm_'+method+'.png')

        #mi=mi_regression_feat_embed(pd.DataFrame(embed_reduc), feat_df)
        #print('mi',mi.sort_values(0)[::-1][:20])
        #print('mi',mi.sort_values(1)[::-1][:20])

        # Plot corrs heatmaps
        plt.close()
        corrs_heatmap_feats = sns.heatmap(feat_df.corr().abs(),
                                          xticklabels=False)
        corrs_heatmap_feats.get_figure().savefig(directory +
                                                 '/corrs_heatmap_feats.pdf',
                                                 bbox_inches='tight')

        plt.close()
        embed_corr = pd.DataFrame(embed).corr().abs()
        embed_corr_heatmap = sns.heatmap(embed_corr)
        embed_corr_heatmap.get_figure().savefig(directory +
                                                '/embed_corr_heatmap.pdf',
                                                bbox_inches='tight')

        plt.close()
        corr_feat_embed = pd.concat([pd.DataFrame(embed), feat_df],
                                    axis=1).corr().abs()
        sns.set(font_scale=0.2)
        corr_feat_embed_heatmap = sns.heatmap(corr_feat_embed,
                                              xticklabels=False)
        # add_margin(corr_feat_embed_heatmap,x=0.1,y=0.0)
        corr_feat_embed_heatmap.get_figure().savefig(
            directory + '/corr_feat_embed_heatmap.pdf', bbox_inches='tight')

    else:
        print('Wrong task, does not exist')
コード例 #27
0
def main_work():

    #################################################

    # ============= Process command line ============
    a = ArgumentParser()
    a.add_argument('-c', dest='config', required=True, type=str)
    a.add_argument('-m',
                   dest='model_type',
                   required=True,
                   choices=['t2m', 'ssrn', 'babbler'])
    opts = a.parse_args()

    # ===============================================
    model_type = opts.model_type
    hp = load_config(opts.config)
    logdir = hp.logdir + "-" + model_type
    logger_setup.logger_setup(logdir)
    info('Command line: %s' % (" ".join(sys.argv)))

    ### TODO: move this to its own function somewhere. Can be used also at synthesis time?
    ### Prepare reference data for validation set:  ### TODO: alternative to holding in memory?
    dataset = load_data(hp, mode="validation")
    valid_filenames, validation_text = dataset['fpaths'], dataset['texts']

    speaker_codes = validation_duration_data = position_in_phone_data = None  ## defaults
    if hp.multispeaker:
        speaker_codes = dataset['speakers']
    if hp.use_external_durations:
        validation_duration_data = dataset['durations']

    ## take random subset of validation set to avoid 'This is a librivox recording' type sentences
    random.seed(1234)
    v_indices = range(len(valid_filenames))
    random.shuffle(v_indices)
    v = min(hp.validation_sentences_to_evaluate, len(valid_filenames))
    v_indices = v_indices[:v]

    if hp.multispeaker:  ## now come back to this after v computed
        speaker_codes = np.array(speaker_codes)[v_indices].reshape(-1, 1)
    if hp.use_external_durations:
        validation_duration_data = validation_duration_data[v_indices, :, :]

    valid_filenames = np.array(valid_filenames)[v_indices]
    validation_mags = [np.load(hp.full_audio_dir + os.path.sep + basename(fpath)+'.npy') \
                                for fpath in valid_filenames]
    validation_text = validation_text[v_indices, :]
    validation_labels = None  # default
    if hp.merlin_label_dir:
        validation_labels = [np.load("{}/{}".format(hp.merlin_label_dir, basename(fpath)+".npy")) \
                              for fpath in valid_filenames ]
        validation_labels = list2batch(validation_labels, hp.max_N)

    if 'position_in_phone' in hp.history_type:

        def duration2position(duration, fractional=False):
            ### very roundabout -- need to deflate A matrix back to integers:
            duration = duration.sum(axis=0)
            #print(duration)
            # sys.exit('evs')
            positions = durations_to_position(duration, fractional=fractional)
            ###positions = end_pad_for_reduction_shape_sync(positions, hp)
            positions = positions[0::hp.r, :]
            #print(positions)
            return positions

        position_in_phone_data = [duration2position(dur, fractional=('fractional' in hp.history_type)) \
                        for dur in dataset['durations'][v_indices]]
        position_in_phone_data = list2batch(position_in_phone_data, hp.max_T)

    if model_type == 't2m':
        validation_mels = [np.load(hp.coarse_audio_dir + os.path.sep + basename(fpath)+'.npy') \
                                    for fpath in valid_filenames]
        validation_inputs = validation_text
        validation_reference = validation_mels
        validation_lengths = None
    elif model_type == 'ssrn':
        validation_inputs, validation_lengths = make_mel_batch(
            hp, valid_filenames)
        validation_reference = validation_mags
    else:
        info(
            'Undefined model_type {} for making validation inputs -- supply dummy None values'
            .format(model_type))
        validation_inputs = None
        validation_reference = None

    ## Get the text and mel inputs for the utts you would like to plot attention graphs for
    if hp.plot_attention_every_n_epochs and model_type == 't2m':  #check if we want to plot attention
        # TODO do we want to generate and plot attention for validation or training set sentences??? modify attention_inputs accordingly...
        attention_inputs = validation_text[:hp.num_sentences_to_plot_attention]
        attention_mels = validation_mels[:hp.num_sentences_to_plot_attention]
        attention_mels = np.array(
            attention_mels)  #TODO should be able to delete this line...?
        attention_mels_array = np.zeros(
            (hp.num_sentences_to_plot_attention, hp.max_T, hp.n_mels),
            np.float32)  # create fixed size array to hold attention mels
        for i in range(hp.num_sentences_to_plot_attention
                       ):  # copy data into this fixed sized array
            attention_mels_array[
                i, :attention_mels[i].shape[0], :attention_mels[i].
                shape[1]] = attention_mels[i]
        attention_mels = attention_mels_array  # rename for convenience

    ## Map to appropriate type of graph depending on model_type:
    AppropriateGraph = {
        't2m': Text2MelGraph,
        'ssrn': SSRNGraph,
        'babbler': BabblerGraph
    }[model_type]

    g = AppropriateGraph(hp)
    info("Training graph loaded")
    synth_graph = AppropriateGraph(hp, mode='synthesize', reuse=True)
    info(
        "Synthesis graph loaded"
    )  #reuse=True ensures that 'synth_graph' and 'attention_graph' share weights with training graph 'g'
    attention_graph = AppropriateGraph(hp,
                                       mode='generate_attention',
                                       reuse=True)
    info("Atttention generating graph loaded")
    #TODO is loading three graphs a problem for memory usage?

    if 0:
        print(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Text2Mel'))
        ## [<tf.Variable 'Text2Mel/TextEnc/embed_1/lookup_table:0' shape=(61, 128) dtype=float32_ref>, <tf.Variable 'Text2Mel/TextEnc/C_2/conv1d/kernel:0' shape=(1, 128, 512) dtype=float32_ref>, ...

    ## TODO: tensorflow.python.training.supervisor deprecated: --> switch to tf.train.MonitoredTrainingSession
    sv = tf.train.Supervisor(logdir=logdir,
                             save_model_secs=0,
                             global_step=g.global_step)

    ## Get the current training epoch from the name of the model that we have loaded
    latest_checkpoint = tf.train.latest_checkpoint(logdir)
    if latest_checkpoint:
        epoch = int(
            latest_checkpoint.strip('/ ').split('/')[-1].replace(
                'model_epoch_', ''))
    else:  #did not find a model checkpoint, so we start training from scratch
        epoch = 0

    ## If save_every_n_epochs > 0, models will be stored here every n epochs and not
    ## deleted, regardless of validation improvement etc.:--
    safe_makedir(logdir + '/archive/')

    with sv.managed_session() as sess:
        if 0:  ## Set to 1 to debug NaNs; at tfdbg prompt, type:    run -f has_inf_or_nan
            ## later:    lt  -f has_inf_or_nan -n .*AudioEnc.*
            os.system('rm -rf {}/tmp_tfdbg/'.format(logdir))
            sess = tf_debug.LocalCLIDebugWrapperSession(sess,
                                                        dump_root=logdir +
                                                        '/tmp_tfdbg/')

        if hp.initialise_weights_from_existing:
            info('=====Initialise some variables from existing model(s)=====')
            sess.graph._unsafe_unfinalize(
            )  ## !!! https://stackoverflow.com/questions/41798311/tensorflow-graph-is-finalized-and-cannot-be-modified/41798401
            for (scope, checkpoint) in hp.initialise_weights_from_existing:
                var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                             scope)
                info('----From existing model %s:----' % (checkpoint))
                if var_list:  ## will be empty when training t2m but looking at ssrn
                    saver = tf.train.Saver(var_list=var_list)
                    saver.restore(sess, checkpoint)
                    for var in var_list:
                        info('   %s' % (var.name))
                else:
                    info('   No variables!')
                info(
                    '========================================================')

        if hp.restart_from_savepath:  #set this param to list: [path_to_t2m_model_folder, path_to_ssrn_model_folder]
            # info('Restart from these paths:')
            info(hp.restart_from_savepath)

            # assert len(hp.restart_from_savepath) == 2
            restart_from_savepath1, restart_from_savepath2 = hp.restart_from_savepath
            restart_from_savepath1 = os.path.abspath(restart_from_savepath1)
            restart_from_savepath2 = os.path.abspath(restart_from_savepath2)

            sess.graph._unsafe_unfinalize(
            )  ## !!! https://stackoverflow.com/questions/41798311/tensorflow-graph-is-finalized-and-cannot-be-modified/41798401
            sess.run(tf.global_variables_initializer())

            print('Restore parameters')
            if model_type == 't2m':
                var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                             'Text2Mel')
                saver1 = tf.train.Saver(var_list=var_list)
                latest_checkpoint = tf.train.latest_checkpoint(
                    restart_from_savepath1)
                saver1.restore(sess, restart_from_savepath1)
                print("Text2Mel Restored!")
            elif model_type == 'ssrn':
                var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'SSRN') + \
                           tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'gs')
                saver2 = tf.train.Saver(var_list=var_list)
                latest_checkpoint = tf.train.latest_checkpoint(
                    restart_from_savepath2)
                saver2.restore(sess, restart_from_savepath2)
                print("SSRN Restored!")
            epoch = int(
                latest_checkpoint.strip('/ ').split('/')[-1].replace(
                    'model_epoch_', ''))
            # TODO: this counter won't work if training restarts in same directory.
            ## Get epoch from gs?

        loss_history = []  #any way to restore loss history too?

        #plot attention generated from freshly initialised model
        if hp.plot_attention_every_n_epochs and model_type == 't2m' and epoch == 0:  # ssrn model doesn't generate alignments
            get_and_plot_alignments(
                hp, epoch - 1, attention_graph, sess, attention_inputs,
                attention_mels, logdir +
                "/alignments")  # epoch-1 refers to freshly initialised model

        current_score = compute_validation(
            hp,
            model_type,
            epoch,
            validation_inputs,
            synth_graph,
            sess,
            speaker_codes,
            valid_filenames,
            validation_reference,
            duration_data=validation_duration_data,
            validation_labels=validation_labels,
            position_in_phone_data=position_in_phone_data)
        info('validation epoch {0}: {1:0.3f}'.format(epoch, current_score))

        while 1:
            progress_bar_text = '%s/%s; ep. %s' % (hp.config_name, model_type,
                                                   epoch)
            for batch_in_current_epoch in tqdm(range(g.num_batch),
                                               total=g.num_batch,
                                               ncols=80,
                                               leave=True,
                                               unit='b',
                                               desc=progress_bar_text):
                gs, loss_components, _ = sess.run(
                    [g.global_step, g.loss_components, g.train_op])
                loss_history.append(loss_components)

            ### End of epoch: validate?
            if hp.validate_every_n_epochs:
                if epoch % hp.validate_every_n_epochs == 0:

                    loss_history = np.array(loss_history)
                    train_loss_mean_std = np.concatenate(
                        [loss_history.mean(axis=0),
                         loss_history.std(axis=0)])
                    loss_history = []

                    train_loss_mean_std = ' '.join([
                        '{:0.3f}'.format(score)
                        for score in train_loss_mean_std
                    ])
                    info('train epoch {0}: {1}'.format(epoch,
                                                       train_loss_mean_std))

                    current_score = compute_validation(
                        hp,
                        model_type,
                        epoch,
                        validation_inputs,
                        synth_graph,
                        sess,
                        speaker_codes,
                        valid_filenames,
                        validation_reference,
                        duration_data=validation_duration_data,
                        validation_labels=validation_labels,
                        position_in_phone_data=position_in_phone_data)
                    info('validation epoch {0:0}: {1:0.3f}'.format(
                        epoch, current_score))

            ### End of epoch: plot attention matrices? #################################
            if hp.plot_attention_every_n_epochs and model_type == 't2m' and epoch % hp.plot_attention_every_n_epochs == 0:  # ssrn model doesn't generate alignments
                get_and_plot_alignments(hp, epoch, attention_graph, sess,
                                        attention_inputs, attention_mels,
                                        logdir + "/alignments")

            ### Save end of each epoch (all but the most recent 5 will be overwritten):
            stem = logdir + '/model_epoch_{0}'.format(epoch)
            sv.saver.save(sess, stem)

            ### Check if we should archive (to files which won't be overwritten):
            if hp.save_every_n_epochs:
                if epoch % hp.save_every_n_epochs == 0:
                    info('Archive model %s' % (stem))
                    for fname in glob.glob(stem + '*'):
                        shutil.copy(fname, logdir + '/archive/')

            epoch += 1
            if epoch > hp.max_epochs:
                info('Max epochs ({}) reached: end training'.format(
                    hp.max_epochs))
                return

    print("Done")
コード例 #28
0
def main_work():

    # ============= Process command line ============
    a = ArgumentParser()
    a.add_argument('-c', dest='config', required=True, type=str)
    a.add_argument('-m',
                   dest='model_type',
                   required=True,
                   choices=['t2m', 'unsup'])
    a.add_argument('-t',
                   dest='task',
                   required=True,
                   choices=[
                       'compute_gradients', 'compute_codes', 'reduce_codes',
                       'compute_opensmile_features', 'show_plot', 'ICE_TTS',
                       'ICE_TTS_server'
                   ])
    a.add_argument('-r',
                   dest='reduction_method',
                   required=False,
                   choices=['pca', 'tsne', 'umap'])
    a.add_argument('-p', dest='port', required=False, type=int, default=5000)
    opts = a.parse_args()
    print('opts')
    print(opts)
    # ===============================================
    model_type = opts.model_type
    method = opts.reduction_method
    hp = load_config(opts.config)
    logdir = hp.logdir + "-" + model_type
    port = opts.port

    mode = 'validation'
    logger_setup.logger_setup(logdir)
    info('Command line: %s' % (" ".join(sys.argv)))
    print(logdir)
    task = opts.task
    if task == 'compute_codes':
        if model_type == 't2m':
            g = Text2MelGraph(hp, mode="synthesize")
            print("Graph 1 (t2m) loaded")
        elif model_type == 'unsup':
            g = Graph_style_unsupervised(hp, mode="synthesize")
            print("Graph 1 (unsup) loaded")
        codes = compute_unsupervised_embeddings(hp, g, model_type, mode=mode)
        save_embeddings(codes, logdir, mode=mode)
        #emo_cats=get_emo_cats(hp)
        #save(emo_cats, logdir, filename='emo_cats')
    elif task == 'reduce_codes':
        try:
            embed = load_embeddings(logdir, mode=mode)[:, 0, :]
        except IndexError:  # I may have changed the shape of the matrix ...
            embed = load_embeddings(logdir, mode=mode)
        #import pdb;pdb.set_trace()
        model, results = embeddings_reduction(embed, method=method)
        save_embeddings(results,
                        logdir,
                        filename='emo_codes_' + method,
                        mode=mode)
        save(model, logdir, filename='code_reduction_model_' + method)
    elif task == 'compute_opensmile_features':
        compute_opensmile_features(hp, audio_extension='.wav', mode=mode)
    elif task == 'show_plot':
        embed = load_embeddings(logdir, filename='emo_codes_' + method)
        scatter_plot(embed)
    elif task == 'ICE_TTS':
        from interface import ICE_TTS
        embed = load_embeddings(logdir)[:, 0, :]
        embed_reduc = load_embeddings(logdir, filename='emo_codes_' + method)
        from PyQt5.QtWidgets import QApplication
        app = QApplication(sys.argv)
        ice = ICE_TTS(hp, embed_reduc, embed)
        ice.show()
        sys.exit(app.exec_())
    elif task == 'ICE_TTS_server':

        from server.ice_tts_server import ICE_TTS_server
        try:
            embed = load_embeddings(logdir, mode=mode)[:, 0, :]
        except IndexError:  # I may have changed the shape of the matrix ...
            embed = load_embeddings(logdir, mode=mode)

        print('Loading embeddings')
        embed_reduc = load_embeddings(logdir, filename='emo_codes_' + method)
        print('Loading emo cats')
        emo_cats = get_emo_cats(hp)
        #emo_cats=load(logdir, filename='emo_cats')
        #import pdb;pdb.set_trace()
        ice = ICE_TTS_server(hp,
                             embed_reduc,
                             embed,
                             emo_cats,
                             model_type=model_type,
                             port=port)
        #ice=ICE_TTS_server(hp, embed_reduc, embed, model_type=model_type)
        #ice=ICE_TTS_server(hp, embed_reduc, embed, n_polar_axes=4, model_type=model_type)

    elif task == 'compute_gradients':
        import seaborn as sns
        print('MODE', mode)
        try:
            embed = load_embeddings(logdir, mode=mode)[:, 0, :]
        except IndexError:  # I may have changed the shape of the matrix ...
            embed = load_embeddings(logdir, mode=mode)

        conf_name = 'eGeMAPSv01a'
        feature_path = os.path.join(hp.featuredir, 'opensmile_features',
                                    conf_name, 'feat_df_' + mode + '.csv')
        feat_df = pd.read_csv(feature_path)
        feat_df = feat_df.drop(columns=['Unnamed: 0'])

        corrs_embed_df, coeff_df = regression_feat_embed(
            pd.DataFrame(embed), feat_df)
        print('Correlations:')
        #print(corrs_embed_df)
        # print('Gradients:')
        # print(coeff_df)
        # corrs_heatmap=sns.heatmap(feat_df.corr())
        # corrs_heatmap.get_figure().savefig('corrs_heatmap.png')

        print(corrs_embed_df.sort_values(0)[::-1][:20])

        #method='pca'

        embed_reduc = load_embeddings(logdir,
                                      filename='emo_codes_' + method,
                                      mode=mode)

        corrs_embed_reduc_df, coeff_reduc_df = regression_feat_embed(
            pd.DataFrame(embed_reduc), feat_df)
        print('Correlations:')
        #print(corrs_embed_reduc_df)
        #print('Gradients:')
        #print(coeff_reduc_df)

        print(corrs_embed_reduc_df.sort_values(0)[::-1][:20])

        #sc=scatter_plot(embed_reduc, c=feat_df['F0semitoneFrom27.5Hz_sma3nz_amean'].values)
        #sc.get_figure().savefig('scatter_'+method+'.png')

        mi = mi_regression_feat_embed(pd.DataFrame(embed_reduc), feat_df)

        print('mi', mi.sort_values(0)[::-1][:20])
        print('mi', mi.sort_values(1)[::-1][:20])

    else:
        print('Wrong task, does not exist')
コード例 #29
0
def main():
    """
    Main apllication loop
    """
    # Argument parsing
    parser = argparse.ArgumentParser(description=PROGDESCR, prog=PROGNAME)
    parser.add_argument('-v',
                        '--version',
                        help='Print version and exit.',
                        action='version',
                        version=VERSION)

    args = parser.parse_args()
    logger.debug("CLI arguments: '{}'".format(args))

    ini = config.load_config(PROGNAME)
    if ini is None:
        return

    # Configure logging on file
    config.config_logging(ini, PROGNAME)
    logger.info("---------------------------------------------------------")
    logger.info("| '{}'  START                   ".format(PROGNAME))
    logger.info("---------------------------------------------------------")

    # Load IoT configuration
    topics = config.load_iot_config(ini)
    if topics == {}:
        logger.error("No topics to subscribe")
        return

    # Connects to databases
    dbs = couchdb_client(ini)
    if dbs == None:
        logger.error("No DB available")
        return

    # Get the list of topic availables
    topics = get_topic_list(dbs)
    logger.info("Available topics: '{}'".format(topics))

    # Thread dictionary
    threads = dict()

    # Create a thread for each topic
    for topic in topics:
        threads[topic] = TopicThread(topic, 10, dbs)
        threads[topic].start()
        logger.info("Thread '{}' started".format(threads[topic].name))

    # Thread monitoring
    still_running = True
    while still_running:
        thr_counter = 0
        for thr in threads:
            if threads[thr].is_alive():
                thr_counter += 1
            else:
                logger.info("Thread '{}' exited".format(threads[thr].name))
        if thr_counter == 0:
            still_running = False
        time.sleep(1)

    logger.info("{} exited".format(PROGNAME))
コード例 #30
0
ファイル: timetracker.py プロジェクト: gomesr/timetracker
          assert False, "option not handled"
          
  home = os.getenv("HOME")
  configfile = home + "/.timetracker.conf"
  
  if ( createconfig ):
      print("Copied default configuration file into place.")
      shutil.copy("%s/timetracker.conf.template" % sys.path[0], configfile)
      sys.exit(0)
  
  if ( loadconfig != None ):
      print("loading %s configuration file" % loadconfig)
      configfile = loadconfig
 
  if ( os.path.exists(configfile) ):
      configuration.load_config(configfile)
      config = configuration.get_config()
      load_defaults(config)
  else:
      print("create a new timetracker config file with the -c option")
      exit(2)
  
  if ( config.has_option("main", "update.interval") ):
      tracker_sleep = int(config.get("main", "update.interval"))
  else:
      tracker_sleep = 30 # default 30s
  
  out = config.get("main", "out.log")
  err = config.get("main", "err.log")
  pidfile = "/tmp/timetracker.pid"
  
コード例 #31
0
def init():
    global configured, jira, git
    # Initialize the connectors
    configured = configuration.load_config()
    jira = connection.jira_connection(configured)
    git = git_ext.GIT()
コード例 #32
0
ファイル: testing.py プロジェクト: wxhawkins/NestIQ
def master_test(gui):
    """
        Run automated tests for unrestricted plotting/statistics, restricted plotting/statistics,
        unsupervised learning and supervised learning.
    """

    def compare_stats(key, ref_path, test_path):
        """
            Compare two statistics files line by line and store discrepencies.

            Args:
                key (int): random number identifier for this test run
                ref_path (pathlib.Path): Path to reference statistics file
                test_path (pathlib.Path): Path to test statistics file
        """

        mismatches = dict()

        # Exctract important lines from files provided

        with open(ref_path, "r") as ref_file, open(test_path, "r") as test_file:
            ref_lines = ref_file.readlines()
            labels = ref_lines[1].strip().split(",")
            ref_vals = ref_lines[12].strip().split(",")
            # test_vals = test_file.readlines()[10].strip().split(",")
            test_vals = test_file.read().split("\n")[12].strip().split(",")

        # Compare values
        for i, label in enumerate(labels):
            if ref_vals[i].strip() != test_vals[i].strip():
                try:
                    if float(ref_vals[i]) != float(test_vals[i]):
                        mismatches[label] = (ref_vals[i], test_vals[i])
                except ValueError:
                    mismatches[label] = (ref_vals[i], "None")

        return mismatches

    def compare_configs(ref_path, test_path):
        """
            Compare two configuration files line by line and store discrepencies.

            Args:
                ref_path (pathlib.Path): Path to reference configuration file
                test_path (pathlib.Path): Path to test configuration file
        """

        with open(ref_path, "r") as ref_file, open(test_path, "r") as test_file:
            ref_lines = ref_file.readlines()
            test_lines = test_file.readlines()
        mismatches = dict()
        for ref_line, test_line in zip(ref_lines[2:], test_lines[2:]):
            if test_line.strip() != ref_line.strip():
                try:
                    # Get line label
                    label = re.search((r"[^=]*"), ref_line).group(0)
                    # Get reference and test values
                    ref_val = re.search((r"=(.*)"), ref_line).group(1).strip()
                    test_val = re.search((r"=(.*)"), test_line).group(1).strip()
                    # Try converting and comparing as floats
                    try:
                        if float(ref_val) != float(test_val):
                            mismatches[label] = (ref_val, test_val)
                    except:
                        if ref_val != test_val:
                            mismatches[label] = (ref_val, test_val)
                except:
                    mismatches[label] = (ref_val, "None")

        return mismatches

    # Initialization
    test_dir_path = gui.master_dir_path / "testing"
    test_out_dir = test_dir_path / "temp_output"

    in_file_path = test_dir_path / "input" / "test_input_long.csv"

    # Load config file
    ref_config_path = test_dir_path / "config" / "test_config.ini"
    load_config(gui, config_file_=ref_config_path)

    # Load testing input file
    replace_entry(gui.input_file_E, in_file_path)

    # Set up output
    rand_key = str(randint(1e6, 1e7))

    # ---------------------------------Statistics----------------------------------------
    # Declare paths
    unres_ref_stats_path = test_dir_path / "stats" / "ref_stats_unrestricted_long.csv"
    res_ref_stats_path = test_dir_path / "stats" / "ref_stats_restricted_long.csv"

    # Set up text coloring
    colorama.init()

    print(f"Key = {rand_key}")

    for test_type in ("unrestricted", "restricted"):
        # Test unrestricted statistics
        print(f"\n\nTesting statistics ({test_type})")
        if test_type == "restricted":
            gui.restrict_search_CB.select()
        else:
            gui.restrict_search_CB.deselect()

        ref_path = res_ref_stats_path if test_type == "restricted" else unres_ref_stats_path

        # Set up output file names
        test_stats_path = test_out_dir / f"{rand_key}_{test_type}.csv"
        test_plot_path = test_out_dir / f"{rand_key}_{test_type}.html"
        replace_entry(gui.stats_file_E, test_stats_path)
        replace_entry(gui.plot_file_E, test_plot_path)

        # Run statistical analysis
        gui.trigger_run()

        # Look for discrepencies in output files
        mismatches = dict()
        mismatches = compare_stats(rand_key, ref_path, test_stats_path)

        # Notify user of mismatched values if any
        if not mismatches:
            print(colored(f"{test_type.upper()} STATS PASSED".center(100, "-"), "green"))
        else:
            print(colored(f"{test_type.upper()} STATS FAILED".center(100, "-"), "red"))
            for key, values in mismatches.items():
                print(
                    colored(key, "yellow")
                    + ": test value of "
                    + colored(str(values[1]), "yellow")
                    + " did not match reference "
                    + colored(str(values[0]), "yellow")
                )

    # ---------------------------------Unsupervised learning--------------------------------------
    print(f"\n\nTesting unsupervised learning")

    load_config(gui, config_file_=ref_config_path)
    gui.unsupervised_learning()
    unsup_test_path = test_out_dir / f"{rand_key}_unsup_test_config.ini"
    unsup_ref_path = test_dir_path / "config" / "unsup_ref_config.ini"
    save_config(gui, out_file=str(unsup_test_path))

    # Search for config discrepencies
    mismatches = dict()
    mismatches = compare_configs(unsup_ref_path, unsup_test_path)

    if not mismatches:
        print(colored("UNSUP PASSED".center(100, "-"), "green"))
    else:
        print(colored("UNSUP FAILED".center(100, "-"), "red"))
        for key, values in mismatches.items():
            print(
                colored(key, "yellow")
                + ": test value of "
                + colored(str(values[1]), "yellow")
                + " did not match reference "
                + colored(str(values[0]), "yellow")
            )

    # ---------------------------------Supervised learning----------------------------------------
    print(f"\n\nTesting supervised learning")

    vertex_file_path = test_dir_path / "plots" / "vertex_selection.html"

    # Attempt to make vertex selection plot
    try:
        gui.select_vertices()
    except:
        print(colored("VERTEX SELECTION PLOT FAILED".center(100, "-"), "red"))
        traceback.print_exc()

    replace_entry(gui.vertex_file_E, vertex_file_path)

    load_config(gui, config_file_=ref_config_path)
    gui.supervised_learning()
    sup_test_path = test_out_dir / f"{rand_key}_sup_test_config.ini"
    sup_ref_path = test_dir_path / "config" / "sup_ref_config.ini"
    save_config(gui, out_file=str(sup_test_path))

    # Search for config discrepencies
    mismatches = dict()
    mismatches = compare_configs(sup_ref_path, sup_test_path)

    if not mismatches:
        print(colored("SUP PASSED".center(100, "-"), "green"))
    else:
        print(colored("SUP FAILED".center(100, "-"), "red"))
        for key, values in mismatches.items():
            print(
                colored(key, "yellow")
                + ": test value of "
                + colored(str(values[1]), "yellow")
                + " did not match reference "
                + colored(str(values[0]), "yellow")
            )

    # ---------------------------------Plot Editing----------------------------------------
    print(f"\n\nTesting plot editing")

    # Establish configuration
    ref_config_path = test_dir_path / "config" / "test_config.ini"
    load_config(gui, config_file_=ref_config_path)

    # Load testing input file and run in edit mode
    in_file_path = test_dir_path / "input" / "test_input_long.csv"
    replace_entry(gui.input_file_E, in_file_path)
    gui.edit_mode_CB.select()
    gui.trigger_run()

    # Declare file paths
    mod_input_path = test_dir_path / "input" / "ref_mod_plot.html"
    mod_ref_path = test_dir_path / "stats" / "ref_mod_stats.csv"

    # Fill entry boxes
    replace_entry(gui.input_file_E, mod_input_path)

    # Set up output file names
    test_mod_stats_path = test_out_dir / f"{rand_key}_modified.csv"
    test_mod_plot_path = test_out_dir / f"{rand_key}_modified.html"
    replace_entry(gui.stats_file_E, test_mod_stats_path)
    replace_entry(gui.plot_file_E, test_mod_plot_path)

    # Rerun with modified verticies
    gui.edit_mode_CB.deselect()
    gui.trigger_run()

    # Look for discrepencies in output files
    mismatches = dict()
    mismatches = compare_stats(rand_key, mod_ref_path, test_mod_stats_path)

    # Notify user of mismatched values if any
    if not mismatches:
        print(colored("PLOT EDITING PASSED".center(100, "-"), "green"))
    else:
        print(colored("PLOT EDITING FAILED".center(100, "-"), "red"))
        for key, values in mismatches.items():
            print(
                colored(key, "yellow")
                + ": test value of "
                + colored(str(values[1]), "yellow")
                + " did not match reference "
                + colored(str(values[0]), "yellow")
            )

    print(colored("TESTING COMPLETED".center(100, "-"), "blue"))
コード例 #33
0
def main(cron_mode=False):
    # Setup database handler and get database content
    # cron_mode is used to tell the bot to act like a cron job; This means it won't be running infinitely according to your time_interval but only when called
    db_handler = DB()
    db = db_handler.readDB()

    # Setup telegram client
    bot = Telegram()

    update_interval = float(
        configuration.load_config()["update_interval"]) * 60 * 60

    while True:
        # A very lazy approach to error handling
        try:
            # Get the latest and trending research
            latest_data = scraper.get_latest()
            trending_data = scraper.get_trending()

            # Remove duplicates so we don't send papers we have sent before
            latest_data = db_handler.remove_duplicates(db["latest_research"],
                                                       latest_data)
            trending_data = db_handler.remove_duplicates(
                db["trending_research"], trending_data)

            # Get full paper info
            latest_papers = scraper.parse_papers(latest_data, "new")
            trending_papers = scraper.parse_papers(trending_data, "trending")

            # format paper info and send to Telegram
            for paper in latest_papers + trending_papers:
                abstract = paper["abstract"]
                abstract = " ".join(abstract.split())
                message = """{}

{}

URL - {}
{}
{}

Code Implementation - {}

#{} @MLpaperz""".format(paper["title"], abstract, paper["url"],
                        paper["abstract link 1"], paper["abstract link 2"],
                        paper["code"], paper["tag"])
                bot.sendMessage(message)

            # update database
            db["latest_research"] = db["latest_research"] + latest_data
            db["trending_research"] = db["trending_research"] + trending_data
            db_handler.writeDB(db)
        except Exception as e:
            print(e)
            pass

        if cron_mode:
            # Makes the loop only run once
            break

        # Sleep till next interval
        bot.alertAdmin()
        time.sleep(update_interval)