def log_validation_results(trainer): evaluate_validate_timer.resume() evaluator_validate.run(val_loader) evaluate_validate_timer.pause() evaluate_validate_timer.step() metrics = evaluator_validate.state.metrics timestamp = get_readable_time() print( timestamp + " Validation set Results - Epoch: {} Avg mae: {:.2f} Avg mse: {:.2f} Avg loss: {:.2f}" .format(trainer.state.epoch, metrics['mae'], metrics['mse'], 0)) experiment.log_metric("valid_mae", metrics['mae']) experiment.log_metric("valid_mse", metrics['mse']) # timer experiment.log_metric("evaluate_valid_timer", evaluate_validate_timer.value()) print("evaluate_valid_timer ", evaluate_validate_timer.value()) # check if that validate is best flag_mae = best_mae.checkAndRecord(metrics['mae'], metrics['mse']) flag_mse = best_mse.checkAndRecord(metrics['mae'], metrics['mse']) if flag_mae or flag_mse: experiment.log_metric("valid_best_mae", metrics['mae']) experiment.log_metric("valid_best_mse", metrics['mse']) experiment.log_metric("valid_best_epoch", trainer.state.epoch) print("BEST VAL, evaluating on test set") evaluate_test_timer.resume() evaluator_test.run(test_loader) evaluate_test_timer.pause() evaluate_test_timer.step() test_metrics = evaluator_test.state.metrics timestamp = get_readable_time() print( timestamp + " Test set Results - Epoch: {} Avg mae: {:.2f} Avg mse: {:.2f} Avg loss: {:.2f}" .format(trainer.state.epoch, test_metrics['mae'], test_metrics['mse'], 0)) experiment.log_metric("test_mae", test_metrics['mae']) experiment.log_metric("test_mse", test_metrics['mse']) experiment.log_metric("evaluate_test_timer", evaluate_test_timer.value()) print("evaluate_test_timer ", evaluate_test_timer.value())
def log_validation_results(trainer): evaluator.run(val_loader) metrics = evaluator.state.metrics timestamp = get_readable_time() print( timestamp + " Validation set Results - Epoch: {} Avg mae: {:.2f} Avg mse: {:.2f} Avg loss: {:.2f}" .format(trainer.state.epoch, metrics['mae'], metrics['mse'], metrics['nll']))
def log_validation_results(trainer): evaluator.run(test_loader) metrics = evaluator.state.metrics timestamp = get_readable_time() print(timestamp + " Validation set Results - Epoch: {} Avg mae: {:.2f} Avg mse: {:.2f} Avg loss: {:.2f}" .format(trainer.state.epoch, metrics['mae'], metrics['mse'], metrics['loss'])) experiment.log_metric("valid_mae", metrics['mae']) experiment.log_metric("valid_mse", metrics['mse']) experiment.log_metric("valid_loss", metrics['loss'])
def log_training_results(trainer): evaluator.run(train_loader) metrics = evaluator.state.metrics timestamp = get_readable_time() print(timestamp + " Training set Results - Epoch: {} Avg mae: {:.2f} Avg mse: {:.2f} Avg loss: {:.2f}" .format(trainer.state.epoch, metrics['mae'], metrics['mse'], metrics['loss'])) experiment.log_metric("epoch", trainer.state.epoch) experiment.log_metric("train_mae", metrics['mae']) experiment.log_metric("train_mse", metrics['mse']) experiment.log_metric("train_loss", metrics['loss']) experiment.log_metric("lr", get_lr(optimizer))
def log_training_results(trainer): experiment.log_metric("epoch", trainer.state.epoch) if not args.skip_train_eval: evaluator_train.run(train_loader_eval) metrics = evaluator_train.state.metrics timestamp = get_readable_time() print( timestamp + " Training set Results - Epoch: {} Avg mae: {:.2f} Avg mse: {:.2f} Avg loss: {:.2f}" .format(trainer.state.epoch, metrics['mae'], metrics['mse'], 0) ) # experiment.log_metric("epoch", trainer.state.epoch) experiment.log_metric("train_mae", metrics['mae']) experiment.log_metric("train_mse", metrics['mse']) experiment.log_metric("lr", get_lr(optimizer)) print("batch_timer ", batch_timer.value()) print("train_timer ", train_timer.value()) experiment.log_metric("batch_timer", batch_timer.value()) experiment.log_metric("train_timer", train_timer.value())
def log_training_loss(trainer): timestamp = get_readable_time() print(timestamp + " Epoch[{}] Loss: {:.2f}".format(trainer.state.epoch, trainer.state.output))
test_metrics['mse'], 0)) experiment.log_metric("test_mae", test_metrics['mae']) experiment.log_metric("test_mse", test_metrics['mse']) experiment.log_metric("evaluate_test_timer", evaluate_test_timer.value()) print("evaluate_test_timer ", evaluate_test_timer.value()) # experiment.log_metric("test_loss", test_metrics['loss']) def checkpoint_valid_mae_score_function(engine): score = engine.state.metrics['mae'] return -score if args.eval_only: print("evaluation only, no training") timestamp = get_readable_time() # if flag_mae or flag_mse: # experiment.log_metric("valid_best_mae", metrics['mae']) # experiment.log_metric("valid_best_mse", metrics['mse']) # print("BEST VAL, evaluating on test set") evaluate_test_timer.resume() evaluator_test.run(test_loader) evaluate_test_timer.pause() evaluate_test_timer.step() test_metrics = evaluator_test.state.metrics timestamp = get_readable_time() if args.eval_density: print( timestamp +