def log_training_results(trainer):
     evaluator.run(train_loader)
     metrics = evaluator.state.metrics
     timestamp = get_readable_time()
     print(timestamp + " Training set Results - Epoch: {}  Avg mae: {:.2f} Avg mse: {:.2f} Avg loss: {:.2f}"
           .format(trainer.state.epoch, metrics['mae'], metrics['mse'], metrics['loss']))
     experiment.log_metric("epoch", trainer.state.epoch)
     experiment.log_metric("train_mae", metrics['mae'])
     experiment.log_metric("train_mse", metrics['mse'])
     experiment.log_metric("train_loss", metrics['loss'])
     experiment.log_metric("lr", get_lr(optimizer))
Пример #2
0
    def log_training_results(trainer):
        experiment.log_metric("epoch", trainer.state.epoch)
        if not args.skip_train_eval:
            evaluator_train.run(train_loader_eval)
            metrics = evaluator_train.state.metrics
            timestamp = get_readable_time()
            print(
                timestamp +
                " Training set Results - Epoch: {}  Avg mae: {:.2f} Avg mse: {:.2f} Avg loss: {:.2f}"
                .format(trainer.state.epoch, metrics['mae'], metrics['mse'], 0)
            )
            # experiment.log_metric("epoch", trainer.state.epoch)
            experiment.log_metric("train_mae", metrics['mae'])
            experiment.log_metric("train_mse", metrics['mse'])
            experiment.log_metric("lr", get_lr(optimizer))

        print("batch_timer ", batch_timer.value())
        print("train_timer ", train_timer.value())
        experiment.log_metric("batch_timer", batch_timer.value())
        experiment.log_metric("train_timer", train_timer.value())