示例#1
0
文件: eval.py 项目: zyDotwei/CMAML
def do_learning(model, train_iter, val_iter, iterations, task=0):
    logger = {str(i): [] for i in range(iterations)}
    loss, ppl_val, ent_b, bleu_score_b = evaluate(model,
                                                  val_iter,
                                                  model_name=config.model,
                                                  ty="test",
                                                  verbose=False)
    logger[str(0)] = [loss, ppl_val, ent_b, bleu_score_b]
    for i in range(1, iterations):
        if i < 5:
            m = "select"
        else:
            m = "selective_training"
        for j, d in enumerate(train_iter):
            _, _, _ = model.train_one_batch(d, mode=m, task=task)
        if (i in list(range(1, 26))):  #1,3,5,7,
            loss, ppl_val, ent_b, bleu_score_b = evaluate(
                model,
                val_iter,
                model_name=config.model,
                ty="test",
                verbose=False,
                log=False,
                result_file="results/results_our " + str(i) + ".txt",
                ref_file="results/ref_our" + str(i) + ".txt",
                case_file="results/case_our" + str(i) + ".txt")
            logger[str(i)] = [loss, ppl_val, ent_b, bleu_score_b]
    return logger
示例#2
0
def do_learning(model, train_iter, val_iter, iterations):
    logger = {str(i): [] for i in range(iterations)}
    loss, ppl_val, ent_b,bleu_score_b = evaluate(model, val_iter, model_name=config.model,ty="test",verbose=False)
    logger[str(0)] = [loss, ppl_val, ent_b, bleu_score_b]
    for i in range(1,iterations):
        for j, d in enumerate(train_iter):
            _, _, _ = model.train_one_batch(d)
        if(i in [1,3,5,7,10]):#1,3,5,7,
            loss, ppl_val, ent_b, bleu_score_b = evaluate(model, val_iter, model_name=config.model,ty="test",verbose=False)
            logger[str(i)] = [loss, ppl_val, ent_b, bleu_score_b]
    return logger
示例#3
0
def train_draft():
    train_dl, val_dl, test_dl, tokenizer = get_dataloaders(
        is_small=config.small)

    if (config.test):
        print("Test model", config.model)
        model = Transformer(model_file_path=config.save_path, is_eval=True)
        evaluate(model, data_loader_test, model_name=config.model, ty='test')
        exit(0)

    model = Summarizer(is_draft=True, toeknizer=tokenizer)
    print("TRAINABLE PARAMETERS", count_parameters(model))
    print("Use Cuda: ", config.USE_CUDA)

    best_rouge = 0
    cnt = 0
    eval_iterval = 500
    for e in range(config.epochs):
        # model.train()
        print("Epoch", e)
        l = []
        pbar = tqdm(enumerate(train_dl), total=len(train_dl))
        for i, d in pbar:
            loss = model.train_one_batch(d)
            l.append(loss.item())
            pbar.set_description("TRAIN loss:{:.4f}".format(np.mean(l)))

            if i % eval_iterval == 0:
                # model.eval()
                loss, r_avg = evaluate(model,
                                       val_dl,
                                       model_name=config.model,
                                       ty="train")
                # each epoch is long,so just do early stopping here.
                if (r_avg > best_rouge):
                    best_rouge = r_avg
                    cnt = 0
                    model.save_model(loss, e, r_avg)
                else:
                    cnt += 1
                if (cnt > 20): break
                # model.train()
        # model.eval()
        loss, r_avg = evaluate(model,
                               val_dl,
                               model_name=config.model,
                               ty="valid")
示例#4
0
import os
import time 
import numpy as np 
import pickle

def count_parameters(model):
    return sum(p.numel() for p in model.parameters() if p.requires_grad)

p = Tasks()

data_loader_tr, data_loader_val, data_loader_test = p.get_all_data(batch_size=config.batch_size)

if(config.test):
    print("Test model",config.model)
    model = Transformer(p.vocab,model_file_path=config.save_path,is_eval=True)
    evaluate(model,data_loader_test,model_name=config.model,ty='test',verbose=True,log=True)
    exit(0)

model = Transformer(p.vocab)
print("MODEL USED",config.model)
print("TRAINABLE PARAMETERS",count_parameters(model))

best_ppl = 1000
cnt = 0
for e in range(config.epochs):
    print("Epoch", e)
    p, l = [],[]
    pbar = tqdm(enumerate(data_loader_tr),total=len(data_loader_tr))
    for i, d in pbar:
        torch.cuda.empty_cache()
        loss, ppl, _ = model.train_one_batch(d)
示例#5
0
文件: main.py 项目: declare-lab/MIME
torch.manual_seed(1234)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(1234)

data_loader_tra, data_loader_val, data_loader_tst, vocab, program_number = prepare_data_seq(batch_size=config.batch_size)

if(config.test):
    print("Test model",config.model)
    model = Train_MIME(vocab, decoder_number=program_number, model_file_path=config.saved_model_path, is_eval=True)
    if (config.USE_CUDA):
        model.cuda()
    model = model.eval()

    loss_test, ppl_test, bce_test, acc_test, bleu_score_g, bleu_score_b, bleu_score_t, ref_results = evaluate(model, data_loader_tst, ty="test",
                                                                               max_dec_step=50, write_summary=True)

    file_summary = config.save_path + "output.txt"
    with open(file_summary, 'w') as the_file:
        the_file.write("EVAL\tLoss\tPPL\tAccuracy\tBleu_g\tBleu_b\tBleu_t\n")
        the_file.write(
            "{}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.2f}\t{:.2f}\t{:.2f}\n".format("test", loss_test, ppl_test, acc_test, bleu_score_g, bleu_score_b, bleu_score_t))
        for o in ref_results: the_file.write(o)
    exit(0)


model = Train_MIME(vocab, decoder_number=program_number)
for n, p in model.named_parameters():
    if p.dim() > 1 and (n != "embedding.lut.weight" and config.pretrain_emb):
        xavier_uniform_(p)
print("TRAINABLE PARAMETERS", count_parameters(model))
示例#6
0
if (config.test):
    print("Test model", config.model)
    if (config.model == "trs"):
        model = Transformer(vocab,
                            decoder_number=program_number,
                            model_file_path=config.save_path,
                            is_eval=True)
    elif (config.model == "experts"):
        model = Transformer_experts(vocab,
                                    decoder_number=program_number,
                                    model_file_path=config.save_path,
                                    is_eval=True)
    if (config.USE_CUDA):
        model.cuda()
    model = model.eval()
    loss_test, ppl_test, bce_test, acc_test, bleu_score_g, bleu_score_b = evaluate(
        model, data_loader_tst, ty="test", max_dec_step=50)
    exit(0)

if (config.model == "trs"):
    model = Transformer(vocab, decoder_number=program_number)
    for n, p in model.named_parameters():
        if p.dim() > 1 and (n != "embedding.lut.weight"
                            and config.pretrain_emb):
            xavier_uniform_(p)
elif (config.model == "experts"):
    model = Transformer_experts(vocab, decoder_number=program_number)
    for n, p in model.named_parameters():
        if p.dim() > 1 and (n != "embedding.lut.weight"
                            and config.pretrain_emb):
            xavier_uniform_(p)
print("MODEL USED", config.model)
示例#7
0
def count_parameters(model):
    return sum(p.numel() for p in model.parameters() if p.requires_grad)


p = Personas()

data_loader_tr, data_loader_val, data_loader_test = \
    p.get_all_data(batch_size=config.batch_size)

if (config.test):
    print("Test model", config.model)
    model = Transformer(p.vocab,
                        model_file_path=config.save_path,
                        is_eval=True)
    evaluate(model, data_loader_test, model_name=config.model, ty='test')
    exit(0)

model = Transformer(p.vocab)
print("MODEL USED", config.model)
print("TRAINABLE PARAMETERS", count_parameters(model))

best_ppl = 1000
cnt = 0
for e in range(config.epochs):
    print("Epoch", e)
    p, l = [], []
    pbar = tqdm(enumerate(data_loader_tr), total=len(data_loader_tr))
    for i, d in pbar:
        loss, ppl, _ = model.train_one_batch(d)
        l.append(loss)
示例#8
0
            loss, ppl, kld, bow, elbo = model.train_one_batch(
                next(data_iter), n_iter)
        #writer.add_scalars('loss', {'loss_train': loss}, n_iter)
        #writer.add_scalars('ppl', {'ppl_train': ppl}, n_iter)
        #writer.add_scalars('kld', {'kld_train': kld}, n_iter)
        #writer.add_scalars('bow', {'bow_train': bow}, n_iter)
        #writer.add_scalars('elbo', {'elbo_train': elbo}, n_iter)
        # if(config.noam):
        #     writer.add_scalars('lr', {'learning_rata': model.optimizer._rate}, n_iter)

        if ((n_iter + 1) % check_iter == 0):
            model = model.eval()
            model.epoch = n_iter
            model.__id__logger = 0
            #evaluate_tra(model, data_loader_tra ,ty="valid", max_dec_step=50)
            loss_val, ppl_val, kld_val, bow_val, elbo_val, bleu_score_g, d1, d2, d3 = evaluate(
                model, data_loader_val, ty="valid", max_dec_step=50)
            # writer.add_scalars('loss', {'loss_valid': loss_val}, n_iter)
            # writer.add_scalars('ppl', {'ppl_valid': ppl_val}, n_iter)
            # writer.add_scalars('kld', {'kld_valid': kld_val}, n_iter)
            # writer.add_scalars('bow', {'bow_valid': bow_val}, n_iter)
            # writer.add_scalars('elbo', {'elbo_valid': elbo_val}, n_iter)
            model = model.train()
            best_elbo = elbo_val
            model.save_model(best_elbo, n_iter, ppl_val, 0, bleu_score_g,
                             kld_val)
            weights_best = deepcopy(model.state_dict())
            #
            # if config.model=="trs":
            #     if config.dataset=="empathetic":
            #         if n_iter>9000: break
            #     else:
         l.append(loss)
         p.append(ppl)
         pbar.set_description(
             "loss:{:.4f} ppl:{:.1f} total_loss:{:.4f} re_loss:{:.4f} kl_loss:{:.4f} bow_loss:{:.4f}"
             .format(loss, ppl, total_loss, re_loss, kl_loss, bow_loss))
         #pbar.set_description("loss:{:.4f} ppl:{:.1f}".format(loss,ppl))
         torch.cuda.empty_cache()
         #if i > 1:
         #break
     #break
     #for i, d in enumerate(data_loader_total):
     #model.get_graph_feature(d)
     #break
     loss, ppl_val, ent_b, bleu_score_b = evaluate(model,
                                                   data_loader_val,
                                                   all_data,
                                                   model_name=config.model,
                                                   ty="valid",
                                                   verbose=False)
     if (ppl_val <= best_ppl):
         best_ppl = ppl_val
         cnt = 0
         best_model = model.state_dict()
         model.save_model(best_ppl, e, 0, 0, bleu_score_b, ent_b)
     else:
         cnt += 1
     if (cnt > 10):
         break
 #break
 model.load_state_dict(best_model)
 if not os.path.exists("results2/" + di + "/"):
     os.makedirs("results2/" + di + "/")