target_variable = training_pair[1] loss = train(input_variable, target_variable, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, device) print_loss_total += loss plot_loss_total += loss if epoch % print_every == 0: print_loss_avg = print_loss_total / print_every print_loss_total = 0 time_since = helpers.time_since(start, epoch / n_epochs) print('%s (%d %d%%) %.4f' % (time_since, epoch, epoch / n_epochs * 100, print_loss_avg)) if epoch % 100 == 0: model_out_path = "checkpoint/" + "params_epoch_{}.tar".format(epoch) if not os.path.exists("checkpoint/"): os.makedirs("checkpoint/") print("세이브 시작") torch.save( { 'epoch': epoch, 'encoder': encoder.state_dict(), 'decoder': decoder.state_dict(), 'encoder_optim': encoder_optimizer.state_dict(), 'decoder_optim': decoder_optimizer.state_dict(), 'decoder.attention': decoder.attention.state_dict() }, model_out_path) print("세이브 끝") helpers.show_plot(plot_losses)
# Keep track of loss print_loss_total += loss plot_loss_total += loss if epoch == 0: continue if epoch % print_every == 0: print_loss_avg = print_loss_total / print_every print_loss_total = 0 time_since = helpers.time_since(start, epoch / n_epochs) print('%s (%d %d%%) %.4f' % (time_since, epoch, epoch / n_epochs * 100, print_loss_avg)) if epoch % plot_every == 0: plot_loss_avg = plot_loss_total / plot_every plot_losses.append(plot_loss_avg) plot_loss_total = 0 # Save our models torch.save(encoder.state_dict(), '../data/encoder_params_{}'.format(args.language)) torch.save(decoder.state_dict(), '../data/decoder_params_{}'.format(args.language)) torch.save(decoder.attention.state_dict(), '../data/attention_params_{}'.format(args.language)) # Plot loss helpers.show_plot(plot_losses)
print_loss_total += loss plot_loss_total += loss if epoch == 0: continue if epoch % print_every == 0: print_loss_avg = print_loss_total / print_every print_loss_total = 0 time_since = helpers.time_since(start, epoch / n_epochs) print('%s (%d %d%%) %.4f' % (time_since, epoch, epoch / n_epochs * 100, print_loss_avg)) if (epoch / n_epochs * 100) % 5 == 0 and epoch > 100 : # if epoch == 30: torch.save(encoder.state_dict(), './data/encoder_params_{}'.format(language)) torch.save(decoder.state_dict(), './data/decoder_params_{}'.format(language)) torch.save(decoder.attention.state_dict(), './data/attention_params_{}'.format(language)) exec(open("eval.py").read()) if epoch % plot_every == 0: plot_loss_avg = plot_loss_total / plot_every plot_losses.append(plot_loss_avg) plot_loss_total = 0 # Save models torch.save(encoder.state_dict(), './data/encoder_params_{}'.format(language)) torch.save(decoder.state_dict(), './data/decoder_params_{}'.format(language)) torch.save(decoder.attention.state_dict(), './data/attention_params_{}'.format(language))