コード例 #1
0
ファイル: train.py プロジェクト: zequnl/Ontology_generation
print("TRAINABLE PARAMETERS",count_parameters(model))

best_ppl = 1000
cnt = 0
for e in range(config.epochs):
    print("Epoch", e)
    p, l = [],[]
    pbar = tqdm(enumerate(data_loader_tr),total=len(data_loader_tr))
    for i, d in pbar:
        torch.cuda.empty_cache()
        loss, ppl, _ = model.train_one_batch(d)
        l.append(loss)
        p.append(ppl)
        pbar.set_description("loss:{:.4f} ppl:{:.1f}".format(np.mean(l),np.mean(p)))
        #pbar.set_description("loss:{:.4f} ppl:{:.1f}".format(loss,ppl))
        torch.cuda.empty_cache()
        #break
        
    loss,ppl_val,ent_b,bleu_score_b = evaluate(model,data_loader_val,model_name=config.model,ty="valid", verbose=True)
    if(ppl_val <= best_ppl):
        best_ppl = ppl_val
        cnt = 0
        model.save_model(best_ppl,e,0,0,bleu_score_b,ent_b)
    else: 
        cnt += 1
    if(cnt > 10): break
 



コード例 #2
0
            model = model.eval()
            model.epoch = n_iter
            model.__id__logger = 0
            loss_val, ppl_val, bce_val, acc_val, bleu_score_g, bleu_score_b = evaluate(
                model, data_loader_val, ty="valid", max_dec_step=50)
            writer.add_scalars('loss', {'loss_valid': loss_val}, n_iter)
            writer.add_scalars('ppl', {'ppl_valid': ppl_val}, n_iter)
            writer.add_scalars('bce', {'bce_valid': bce_val}, n_iter)
            writer.add_scalars('accuracy', {'acc_train': acc_val}, n_iter)
            model = model.train()
            if (config.model == "experts" and n_iter < 13000):
                continue
            if (ppl_val <= best_ppl):
                best_ppl = ppl_val
                patient = 0
                model.save_model(best_ppl, n_iter, 0, 0, bleu_score_g,
                                 bleu_score_b)
                weights_best = deepcopy(model.state_dict())
            else:
                patient += 1
            if (patient > 2): break
except KeyboardInterrupt:
    print('-' * 89)
    print('Exiting from training early')

## TESTING
model.load_state_dict({name: weights_best[name] for name in weights_best})
model.eval()
model.epoch = 100
loss_test, ppl_test, bce_test, acc_test, bleu_score_g, bleu_score_b = evaluate(
    model, data_loader_tst, ty="test", max_dec_step=50)
コード例 #3
0
ファイル: MAML.py プロジェクト: cstghitpku/PAML
            loss, ppl = do_evaluation(meta_net, val_iter)
            val_loss_before.append(math.exp(loss))
            # mate tuning
            val_loss, val_ppl = do_learning_fix_step(
                meta_net,
                train_iter,
                val_iter,
                iterations=config.meta_iteration)
            val_loss_meta.append(math.exp(val_loss.item()))
            # updated result

            meta_net.load_state_dict(
                {name: weights_original[name]
                 for name in weights_original})

        writer.add_scalars('loss_before',
                           {'val_loss_before': np.mean(val_loss_before)},
                           meta_iteration)
        writer.add_scalars('loss_meta',
                           {'val_loss_meta': np.mean(val_loss_meta)},
                           meta_iteration)
        #check early stop
        if np.mean(val_loss_meta) < best_loss:
            best_loss = np.mean(val_loss_meta)
            stop_count = 0
            meta_net.save_model(best_loss, 1, 0.0, 0.0, 0.0, 1.1)
        else:
            stop_count += 1
            if stop_count > patience:
                break
コード例 #4
0
    exit(0)

model = Transformer(p.vocab)
print("MODEL USED", config.model)
print("TRAINABLE PARAMETERS", count_parameters(model))

best_ppl = 1000
cnt = 0
for e in range(config.epochs):
    print("Epoch", e)
    p, l = [], []
    pbar = tqdm(enumerate(data_loader_tr), total=len(data_loader_tr))
    for i, d in pbar:
        loss, ppl, _ = model.train_one_batch(d)
        l.append(loss)
        p.append(ppl)
        pbar.set_description("loss:{:.4f} ppl:{:.1f}".format(
            np.mean(l), np.mean(p)))
    loss, ppl_val, ent_b, bleu_score_b = evaluate(model,
                                                  data_loader_val,
                                                  model_name=config.model,
                                                  ty="valid")
    if (ppl_val <= best_ppl):
        best_ppl = ppl_val
        cnt = 0
        model.save_model(best_ppl, e, 0, 0, 0, ent_b)
    else:
        cnt += 1
    if (cnt > 10):
        break
コード例 #5
0
ファイル: main.py プロジェクト: Ravikiran2402/_MOEL
 if((n_iter+1)%check_iter==0):    
     model = model.eval()
     model.epoch = n_iter
     model.__id__logger = 0 
     loss1, loss2, loss_val, ppl_val, bce_val, acc_val, bleu_score_g, bleu_score_b= evaluate(model, data_loader_val ,ty="valid", max_dec_step=50)
     #loss_val, ppl_val, bce_val, acc_val, bleu_score_g, bleu_score_b= evaluate(model, data_loader_val ,ty="valid", max_dec_step=50)
     writer.add_scalars('loss', {'loss_valid': loss_val}, n_iter)
     writer.add_scalars('ppl', {'ppl_valid': ppl_val}, n_iter)
     writer.add_scalars('bce', {'bce_valid': bce_val}, n_iter)
     writer.add_scalars('accuracy', {'acc_valid': acc_val}, n_iter)
     writer.add_scalars('loss1', {'loss1_valid': loss1}, n_iter)
     writer.add_scalars('loss2', {'loss2_valid': loss2}, n_iter)
     model = model.train()
     #torch.save(model, "saved_models_testing/saved_model{}_2603_0.3.pt".format(n_iter+1))
    
     model.save_model(ppl_val,n_iter,0 ,0,bleu_score_g,bleu_score_b)
     f=open("log.txt","a")
     f.write(str(n_iter)+" ")
     f.write(str(loss_val)+" ")
     f.write(str(ppl_val)+" ")
     f.write(str(bce_val)+" ")
     f.write(str(acc_val)+" ")
     f.write(str(bleu_score_g)+" ")
     f.write(str(bleu_score_b)+" ")
     f.write(str(loss1)+" ")
     f.write(str(loss2)+"\n")
     f.close()
     
     if (config.model == "experts" and n_iter<13000):
         continue
     if(ppl_val <= best_ppl):