Esempio n. 1
0
def beamsearch_hp(datapath, benchmark, backbone, thres, alpha, logpath,
                  candidate_base, candidate_layers, beamsize, maxdepth):
    r"""Implementation of beam search for hyperpixel layers"""

    # 1. Model, and dataset initialization
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = hpflow.HyperpixelFlow(backbone, '0', benchmark, device)
    download.download_dataset(os.path.abspath(datapath), benchmark)
    dset = download.load_dataset(benchmark, datapath, thres, device, 'val')
    dataloader = DataLoader(dset, batch_size=1, num_workers=0)

    # 2. Search for the k-best base layers
    membuf_cand = []
    for base in candidate_base:
        start = time.time()
        hyperpixel = parse_layers(base)
        score = evaluate.run(datapath, benchmark, backbone, thres, alpha,
                             hyperpixel, logpath, True, model, dataloader)
        log_evaluation(base, score, time.time() - start)
        membuf_cand.append((score, base))
    membuf_topk = find_topk(membuf_cand, beamsize)
    score_sel, layer_sel = find_topk(membuf_cand, 1)[0]
    log_selected(0, membuf_topk)

    # 3. Proceed iterative search
    for depth in range(1, maxdepth):
        membuf_cand = []
        for _, test_layer in membuf_topk:
            for cand_layer in candidate_layers:
                if cand_layer not in test_layer and cand_layer > min(test_layer):
                    start = time.time()
                    test_layers = sorted(test_layer + [cand_layer])
                    if test_layers in list(map(lambda x: x[1], membuf_cand)):
                        break
                    hyperpixel = parse_layers(test_layers)
                    score = evaluate.run(datapath, benchmark, backbone, thres, alpha,
                                         hyperpixel, logpath, True, model, dataloader)

                    log_evaluation(test_layers, score, time.time() - start)
                    membuf_cand.append((score, test_layers))

        membuf_topk = find_topk(membuf_cand, beamsize)
        score_tmp, layer_tmp = find_topk(membuf_cand, 1)[0]

        if score_tmp > score_sel:
            layer_sel = layer_tmp
            score_sel = score_tmp
        log_selected(depth, membuf_topk)

    # 4. Log best layer combination and validation performance
    logging.info('\nBest layers, score: %s %5.3f' % (layer_sel, score_sel))

    return layer_sel
Esempio n. 2
0
def partial_train_and_decode_and_eval():
    file_length = file_len('heb-pos.train')
    print str(file_length)
    tenth = file_length / 10
    print tenth
    for n in range(1, 11, 1):
        print n
        with open("heb-pos.train") as train_file:
            head = [next(train_file) for x in xrange(tenth * n)]
        with open("exps/partial.train", "w+") as partial:
            for line in head:
                partial.write(line)
        train.run('2', "exps/partial.train", 'y')
        decode.run('2', 'heb-pos.test', 'exps/hmm-part-smooth-y.lex',
                   'exps/hmm-part-smooth-y.gram')
        evaluate.run('results/hmm.tagged', 'heb-pos.gold', '2', 'y')
Esempio n. 3
0
def evaluate_returns(config):
    """
    Plot the returns over checkpoints of a trained model
    :param config: The config for the model
    :return: N/A
    """
    checkpoints = np.arange(1, config.trained_episodes, config.step)
    return_eps = np.zeros(checkpoints.shape[0] + 1)  # checkpoints + final
    return_good_agents = np.zeros(checkpoints.shape[0] + 1)
    return_adversary_agents = np.zeros(checkpoints.shape[0] + 1)
    return_agents = []

    # Add flag for disabling gif
    config.save_gifs = False

    for i in range(checkpoints.shape[0]):
        incremental = checkpoints[i]
        run_config = copy.deepcopy(config)
        run_config.incremental = incremental
        total_return, agent_return, good_returns, adversary_returns = run(
            run_config)
        return_eps[i] = total_return
        return_good_agents[i] = good_returns
        return_adversary_agents[i] = adversary_returns
        return_agents.append(agent_return)

    # Evaluate the final model
    total_return, agent_return, good_returns, adversary_returns = run(
        run_config)
    return_eps[-1] = total_return
    return_good_agents[-1] = good_returns
    return_adversary_agents[-1] = adversary_returns
    return_agents.append(agent_return)

    # Plot the returns
    # plot_agents_return(np.array(return_agents), np.append(checkpoints, checkpoints[-1] + config.step), config)
    plot_return(return_adversary_agents,
                np.append(checkpoints, checkpoints[-1] + config.step), config,
                "Adversary")
Esempio n. 4
0
def evaluate_run_returns(run_configs):
    """
    Evaluate the returns over a number of run_configs;
    :return:
    """
    return_eps = np.zeros(len(run_configs))  # checkpoints + final
    return_good_agents = np.zeros(len(run_configs))
    return_adversary_agents = np.zeros(len(run_configs))
    return_agents = []
    for i in range(len(run_configs)):
        run_config = run_configs[i]
        total_return, agent_return, good_returns, adversary_returns = run(
            run_config)
        return_eps[i] = total_return
        return_good_agents[i] = good_returns
        return_adversary_agents[i] = adversary_returns
        return_agents.append(agent_return)
    return return_eps, return_agents, return_good_agents, return_adversary_agents
Esempio n. 5
0
def train(data, config):
    V_record = []  # training result of W in different folds
    acc_record = []  # validataion accuracy
    nr_fold = config.getint('data', 'nfold')
    lamda = config.getfloat('model', 'lamda')
    sim_scale = config.getfloat('model', 'sim_scale')
    Xtr = data['Xtr']
    Ytr = data['Ytr']
    fold_loc = data['fold_loc']
    Sig_Y = data['Sig_Y']

    # test fold
    # fold_loc = [[0,1],[1,4],[2,4],[1,3],[0,5]]

    #print "before: Xtr", Xtr.shape
    for j in range(nr_fold):
        print "-----------fold: ", j, " ---------"

        #print "delete loc ", j, len(fold_loc[j])
        Xbase = np.delete(Xtr, fold_loc[j], axis=0)
        Ybase = np.delete(Ytr, fold_loc[j], axis=0)
        Xval = Xtr[fold_loc[j]]
        Yval = Ytr[fold_loc[j]]

        Sim_base = Compute_Sim(Sig_Y, Ybase, Ybase, sim_scale)

        #print "xbase:", Xbase.shape, " xtr:", Xtr.shape, " Ybase:", Ybase.shape, " Ytr:", Ytr.shape
        V = learn.learning(Sim_base, Xbase, Ybase, lamda)
        acc = evaluate.run(Sig_Y, Xval, Yval, Xbase, Ybase, V, sim_scale)
        V_record.append(V)
        acc_record.append(acc)
    print acc_record

    # average accuracy
    acc = 0.0
    acc2 = 0.0
    for accs in acc_record:
        acc = acc + accs[0]
        acc2 = acc2 + accs[1]
    print acc / nr_fold, acc2 / nr_fold
Esempio n. 6
0
        for e in range(epochs):
            saved = False
            loss = train(engine=engine,
                         prot_data=prot_data,
                         morph_data=morph_data,
                         acro_data=acro_data,
                         para_data=para_data)

            eval_dict = {
                'protein': {
                    'data': prot_val_data,
                    'tests': ['loss', 'auc']
                }
            }
            results = evaluate.run(engine, eval_dict)

            # short circuit the or on empty proteins
            if args['--protein_data'] == '' or results['protein'][
                    'loss'] < best_vloss:
                if len(prot_data) > 0:
                    saved = True
                    best_pEncoder = engine.pEncoder.state_dict()
                    torch.save(engine.pEncoder.state_dict(),
                               f'files/{sid}.protein.pkl')
                    best_vloss = results['protein']['loss']
                if len(morph_data) > 0:
                    best_mSeq2Seq = engine.mSeq2Seq.state_dict()
                    torch.save(engine.mSeq2Seq.state_dict(),
                               f'files/{sid}.morpheme.pkl')
                if len(acro_data) > 0:
Esempio n. 7
0
from configs import CONFIG
from lib import errors as e

if __name__ == "__main__":
    print("\nTranscribe all the music...\n")

    num_of_args = len(sys.argv)
    if num_of_args != 9:
        e.print_usage()
        sys.exit()

    arg_parser = argparse.ArgumentParser(description='Get run specs.')
    arg_parser.add_argument('-m', dest='mode', required=True)
    arg_parser.add_argument('-model', dest='model', required=True)
    arg_parser.add_argument('-c', dest='dataset_config', required=True)
    arg_parser.add_argument('-t', dest='transform_type', required=True)
    args = arg_parser.parse_args()

    dataset_id = args.dataset_config + "_" + args.transform_type
    experiment_id = dataset_id + "_" + args.model
    if args.mode == 'preprocess' and e.is_valid_args(CONFIG, args):
        pre.run(CONFIG, args, dataset_id)
    elif args.mode == 'train' and e.is_valid_args(CONFIG, args):
        train.run(CONFIG, args, dataset_id, experiment_id)
    elif args.mode == 'evaluate' and e.is_valid_args(CONFIG, args):
        eval.run(CONFIG, args, dataset_id, experiment_id)
    else:
        e.print_usage()
    sys.exit()

Esempio n. 8
0
 def run_eval(self):
     config = self.CONFIG
     version = config['version']
     evaluate.run(version)
     return None
Esempio n. 9
0
def eval(gold_file, predicted_file, **args):
    evaluate.run(gold_file, predicted_file, **args)
Esempio n. 10
0
 def run_eval(self):
     config = self.CONFIG
     version = config['version']
     evaluate.run(version)
     return None
Esempio n. 11
0
def run():
    train.run()
    evaluate.run()