eval_model_fn, loss_fn, optimizer, dataloader, batch_preprocessing_fn, log_interval=1) test_loss[epoch - 1] = process_epoch('test', epoch, eval_model_fn, loss_fn, optimizer, dataloader_test, batch_preprocessing_fn, log_interval=1) # remember best loss is_best = test_loss[epoch - 1] < best_test_loss best_test_loss = min(test_loss[epoch - 1], best_test_loss) save_checkpoint( { 'epoch': epoch, 'args': args, 'state_dict': model.state_dict(), 'best_test_loss': best_test_loss, 'optimizer': optimizer.state_dict(), 'train_loss': train_loss, 'test_loss': test_loss, }, is_best, checkpoint_name) print('Done!')
) test_loss[epoch - 1] = process_epoch( "test", epoch, model, loss_fn, optimizer, dataloader_test, batch_preprocessing_fn=None, log_interval=1, ) # remember best loss is_best = test_loss[epoch - 1] < best_test_loss best_test_loss = min(test_loss[epoch - 1], best_test_loss) save_checkpoint( { "epoch": epoch, "args": args, "state_dict": model.state_dict(), "best_test_loss": best_test_loss, "optimizer": optimizer.state_dict(), "train_loss": train_loss, "test_loss": test_loss, }, is_best, checkpoint_name, ) print("Done!")