Example #1
0
plot_losses = []
for epoch in range(N_EPOCHS):

    start_time = time.time()

    train_loss = train(encoder,decoder,train_loader,encoder_optimizer,decoder_optimizer,criterion)
    valid_loss,right_ratio = evaluate(encoder,decoder,dev_loader,criterion)

    end_time = time.time()

    epoch_mins, epoch_secs = epoch_time(start_time, end_time)
    #存储dev最好的模型
    if valid_loss < best_valid_loss:
        best_valid_loss = valid_loss
        torch.save(encoder.state_dict(), 'encoder.pt')
        torch.save(decoder.state_dict(), 'decoder.pt')

    print(f'Epoch: {epoch + 1:02} | Time: {epoch_mins}m {epoch_secs}s')
    print(f'\tTrain Loss: {train_loss:.3f}')
    print(f'\t Val. Loss: {valid_loss:.3f}')
    print(f'\t Val. acc: {right_ratio:.3f}')

    plot_losses.append([train_loss, valid_loss / len(dev_loader), right_ratio])
a = [i[0] for i in plot_losses]
b = [i[1] for i in plot_losses]
c = [i[2] * 100 for i in plot_losses]
plt.plot(a, '-', label = 'Training Loss')
plt.plot(b, ':', label = 'Validation Loss')
plt.plot(c, '.', label = 'Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Loss & Accuracy')
Example #2
0
            optimizer.step()
            running_loss += loss.item()
            print_loss_total += loss.item()
            total_loss += loss.item()
            if (i + 1) % 5000 == 0:
                print('{}/{}, Loss:{:.6f}'.format(
                    i + 1, len(lang_dataloader), running_loss / 5000))
                running_loss = 0
            if (i + 1) % 100 == 0:
                plot_loss = print_loss_total / 100
                plot_losses.append(plot_loss)
                print_loss_total = 0
        during = time.time() - since
        print('Finish {}/{} , Loss:{:.6f}, Time:{:.0f}s'.format(
            epoch + 1, total_epoch, total_loss / len(lang_dataset), during))
    show_plot(plot_losses)


if use_attn:
    train(encoder, attn_decoder, total_epoch, use_attn=True)
else:
    train(encoder, decoder, total_epoch, use_attn=False)

print('finish training!')
if use_attn:
    torch.save(encoder.state_dict(), './encoder.pth')
    torch.save(attn_decoder.state_dict(), './attn_decoder.pth')
else:
    torch.save(encoder.state_dict(), './encoder.pth')
    torch.save(decoder.state_dict(), './decoder.pth')
            running_loss += loss.data[0]
            print_loss_total += loss.data[0]
            total_loss += loss.data[0]
            if (i + 1) % 5000 == 0:
                print('{}/{}, Loss:{:.6f}'.format(
                    i + 1, len(lang_dataloader), running_loss / 5000))
                running_loss = 0
            if (i + 1) % 100 == 0:
                plot_loss = print_loss_total / 100
                plot_losses.append(plot_loss)
                print_loss_total = 0
        during = time.time() - since
        print('Finish {}/{} , Loss:{:.6f}, Time:{:.0f}s'.format(
            epoch + 1, total_epoch, total_loss / len(lang_dataset), during))
        print()
    showPlot(plot_losses)


if use_attn:
    train(encoder, attn_decoder, total_epoch, use_attn=True)
else:
    train(encoder, decoder, total_epoch, use_attn=False)

print('finish training!')
if use_attn:
    torch.save(encoder.state_dict(), './encoder.pth')
    torch.save(attn_decoder.state_dict(), './attn_decoder.pth')
else:
    torch.save(encoder.state_dict(), './encoder.pth')
    torch.save(decoder.state_dict(), './decoder.pth')