コード例 #1
0
    optimizer = torch.optim.Adam(model.parameters(), lr=0.01)  #Adam 0.01
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=1,
                                                gamma=0.7)
    len_train = len(train_ds)
    len_val = len(val_ds)
    loss_plt = []
    for epoch in range(N_EPOCHS):
        train_dl = DataLoader(train_ds,
                              batch_size=BATCH_SIZE,
                              shuffle=True,
                              collate_fn=train_ds.generate_batch)
        val_dl = DataLoader(val_ds,
                            batch_size=BATCH_SIZE,
                            collate_fn=val_ds.generate_batch)
        train_loss, train_acc = train_epoc(train_dl, model, criterion,
                                           optimizer, scheduler)
        valid_loss, valid_acc = val_epoc(val_dl, model, criterion)
        print("epoch", epoch)
        print(
            f'\tLoss: {train_loss/len_train:.8f}(train)\t|\tAcc: {train_acc/len_train * 100:.3f}%(train)'
        )
        print(
            f'\tLoss: {valid_loss/len_val:.8f}(valid)\t|\tAcc: {valid_acc/len_val * 100:.3f}%(valid)'
        )
        loss_plt.append(train_acc / len_train)
        torch.save(model.state_dict(), "gender-" + str(epoch) + ".pth")
    print(loss_plt)
    plt.plot(range(len(loss_plt)), loss_plt)
    plt.show()
コード例 #2
0
    train_df=train_df.drop(val_df.index)
    train_df.reset_index(drop=True, inplace=True)
    val_df.reset_index(drop=True, inplace=True)
    train_ds=dataset.seq_dataset(train_df,SEQ_LEN,"age")
    val_ds=dataset.seq_dataset(val_df,SEQ_LEN,"age")
    model = model.baseline_model(VOCAB_SIZE, EMBED_DIM, NUN_CLASS,MODE).to(device)
    criterion = torch.nn.CrossEntropyLoss().to(device)
    optimizer = torch.optim.Adam(model.parameters(),lr=0.01)#Adam 0.01
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,step_size=1,gamma=0.7)
    len_train=len(train_ds) 
    len_val=len(val_ds)  
    loss_plt=[]   
    for epoch in range(N_EPOCHS):
        train_dl = DataLoader(train_ds, batch_size=BATCH_SIZE, shuffle=True,
                      collate_fn=train_ds.generate_batch)
        val_dl = DataLoader(val_ds, batch_size=BATCH_SIZE, 
                      collate_fn=val_ds.generate_batch)  
        train_loss, train_acc = train_epoc(train_dl,model,criterion,optimizer,scheduler)
        valid_loss, valid_acc = val_epoc(val_dl,model,criterion)
        print("epoch",epoch)
        print(f'\tLoss: {train_loss/len_train:.8f}(train)\t|\tAcc: {train_acc/len_train * 100:.3f}%(train)')
        print(f'\tLoss: {valid_loss/len_val:.8f}(valid)\t|\tAcc: {valid_acc/len_val * 100:.3f}%(valid)')
        loss_plt.append(train_acc/len_train)
        torch.save(model.state_dict(),"age-"+str(epoch)+".pth")
    print(loss_plt)
    plt.plot(range(len(loss_plt)), loss_plt) 
    plt.show()



コード例 #3
0
ファイル: baseline_main.py プロジェクト: NaiveXu/Master
            #print("Time after: ", datetime.datetime.now())
            total_accuracy.append(accuracy)
            total_loss.append(loss)

            if (epoch % 20 == 0):
                print("\n\n--- Test epoch " + str(epoch) + " ---\n\n")
                validate.validate(model, epoch, optimizer, test_loader, args,
                                  logger, test_acc_dict, episode, criterion)

            #memory.flush()

            ### SAVING CHECKPOINT ###
            save_checkpoint({
                'epoch': epoch + 1,
                'episode': episode,
                'state_dict': model.state_dict(),
                'accuracy': acc_dict,
                'tot_accuracy': total_accuracy,
                'tot_loss': total_loss,
            })

        elapsed_time = time.time() - start_time
        print("ELAPSED TIME = " + str(elapsed_time) + " seconds")
        answer = input("How many more epochs to train: ")
        try:
            if int(answer) == 0:
                done = True
            else:
                args.start_epoch = args.epochs + 1
                args.epochs += int(answer)
        except: