Esempio n. 1
0
    def make_private_params(self, run):
        """Initializes many experiment specific variables.
        
        Parameters
        ----------
            run : int
                Number of the current experiment.

        Returns
        -------
            private_params : dict
                Directory that contains all experiment specific settings.
        """
        private_params = {
            'run':
            run,
            'seed':
            self.params['random_seed'][run - 1],
            'device':
            '/gpu:*' if self.params['use_gpu'] else '/cpu:*',
            'start_era':
            0,
            'current_era':
            0,
            'current_episode':
            0,
            'current_step':
            0,
            'eval_episodes':
            0,
            'eval_steps':
            0,
            'cumulative_gamma':
            math.pow(self.params['gamma'], self.params['update_horizon'])
        }
        private_paths = {}
        private_paths['base_dir'] = str(
            Path(
                os.path.join(
                    self.params['exp_dir'],
                    'run_{:02d}_s{}'.format(run, private_params['seed']))))
        private_paths['ckpt_dir'] = str(
            Path(os.path.join(private_paths['base_dir'], 'checkpoints')))
        private_paths['log_dir'] = str(
            Path(os.path.join(private_paths['base_dir'], 'logs')))
        private_paths['var_file'] = str(
            Path(os.path.join(private_paths['base_dir'], 'vars.yaml')))
        if self.params['source_dir'] is not None:
            util.copy_dir(self.params['source_dir'], private_paths['base_dir'])  # pylint: disable=no-member
        return private_params, private_paths
Esempio n. 2
0
    with open(tree_file, 'w') as f:
      print( tree, file=f )

    # reestimate model params for the given tree
    cur_modelfile = raxml_launcher.evaluate(tree_file, ref_msa, cur_outdir)

    # place outgroup
    placement.launch_epa( tree_file, cur_modelfile, ref_msa, query_msa, cur_outdir, thorough=True )

    result_files.append( os.path.join( cur_outdir, "epa_result.jplace") )

# ================================================================
# post-analysis
# ================================================================
result_dir = paths.epa_rooting_dir
util.make_path_clean( result_dir )
# do the summary high level stats
placement.outgroup_check( result_files, result_dir )
# also generate the lwr histograms
hist_csv_file = placement.gappa_examine_lwr( epa_out_dir, result_dir )
placement.ggplot_lwr_histogram( hist_csv_file, result_dir)

# ================================================================
# finally, export the results
# ================================================================
# also export the individual results
for f in result_files:
  d = os.path.dirname( f )
  util.copy_dir( d, os.path.join( result_dir, os.path.basename(d) ), ["*.rba", "*.phy", "*.startTree"] )

Esempio n. 3
0
def test_copy_dir():
    util.copy_dir('d1', 'd3', True)
    return 'copy OK:'
Esempio n. 4
0
def test_delete():
    util.copy_file('a.txt', 'a.txt.bak')
    util.copy_dir('d1', 'd1_bak')
    util.delete('a.txt')
    util.delete('d1', force=True)
    return 'delete OK'
Esempio n. 5
0
def main():
    torch.multiprocessing.set_sharing_strategy('file_system')
    print('[RUN] parse arguments')
    args, framework, optimizer, data_loader_dict, tester_dict = option.parse_options()

    print('[RUN] create result directories')
    result_dir_dict = util.create_result_dir(args.result_dir, ['src', 'log', 'snapshot', 'test'])
    util.copy_file(args.bash_file, args.result_dir)
    util.copy_dir('./src', result_dir_dict['src'])

    print('[RUN] create loggers')
    train_log_dir = os.path.join(result_dir_dict['log'], 'train')
    train_logger = SummaryWriter(train_log_dir)

    print('[OPTIMIZER] learning rate:', optimizer.param_groups[0]['lr'])
    n_batches = data_loader_dict['train'].__len__()
    global_step = args.training_args['init_iter']

    print('')
    skip_flag = False
    while True:
        start_time = time.time()
        for train_data_dict in data_loader_dict['train']:
            batch_time = time.time() - start_time

            if skip_flag:
                skip_flag = False
            else:
                if global_step in args.snapshot_iters:
                    snapshot_dir = os.path.join(result_dir_dict['snapshot'], '%07d' % global_step)
                    util.save_snapshot(framework.network, optimizer, snapshot_dir)

                if global_step in args.test_iters:
                    test_dir = os.path.join(result_dir_dict['test'], '%07d' % global_step)
                    util.run_testers(tester_dict, framework, data_loader_dict['test'], test_dir)

                if args.training_args['max_iter'] <= global_step:
                    break

                if global_step in args.training_args['lr_decay_schd'].keys():
                    util.update_learning_rate(optimizer, args.training_args['lr_decay_schd'][global_step])

            train_loss_dict, train_time = \
                train_network_one_step(args, framework, optimizer, train_data_dict, global_step)

            if train_loss_dict is None:
                skip_flag = True
                train_data_dict.clear()
                del train_data_dict

            else:
                if global_step % args.training_args['print_intv'] == 0:
                    iter_str = '[TRAINING] %d/%d:' % (global_step, args.training_args['max_iter'])
                    info_str = 'n_batches: %d, batch_time: %0.3f, train_time: %0.3f' % \
                               (n_batches, batch_time, train_time)
                    train_str = util.cvt_dict2str(train_loss_dict)
                    print(iter_str + '\n- ' + info_str + '\n- ' + train_str + '\n')

                    for key, value in train_loss_dict.items():
                        train_logger.add_scalar(key, value, global_step)

                train_loss_dict.clear()
                train_data_dict.clear()
                del train_loss_dict, train_data_dict
                global_step += 1

            start_time = time.time()
        if args.training_args['max_iter'] <= global_step:
            break
    train_logger.close()