예제 #1
0
def _test_loggers_fit_test(tmpdir, logger_class):
    model = EvalModelTemplate()

    class StoreHistoryLogger(logger_class):
        def __init__(self, *args, **kwargs):
            super().__init__(*args, **kwargs)
            self.history = []

        def log_metrics(self, metrics, step):
            super().log_metrics(metrics, step)
            self.history.append((step, metrics))

    logger_args = _get_logger_args(logger_class, tmpdir)
    logger = StoreHistoryLogger(**logger_args)

    if logger_class == WandbLogger:
        # required mocks for Trainer
        logger.experiment.id = 'foo'
        logger.experiment.project_name.return_value = 'bar'

    if logger_class == CometLogger:
        logger.experiment.id = 'foo'
        logger.experiment.project_name = 'bar'

    if logger_class == TestTubeLogger:
        logger.experiment.version = 'foo'
        logger.experiment.name = 'bar'

    if logger_class == MLFlowLogger:
        logger = mock_mlflow_run_creation(logger, experiment_id="foo", run_id="bar")

    trainer = Trainer(
        max_epochs=1,
        logger=logger,
        limit_train_batches=1,
        limit_val_batches=1,
        log_every_n_steps=1,
        default_root_dir=tmpdir,
    )
    trainer.fit(model)
    trainer.test()

    log_metric_names = [(s, sorted(m.keys())) for s, m in logger.history]
    if logger_class == TensorBoardLogger:
        expected = [
            (0, ['hp_metric']),
            (0, ['epoch', 'train_some_val']),
            (0, ['early_stop_on', 'epoch', 'val_acc']),
            (0, ['hp_metric']),
            (1, ['epoch', 'test_acc', 'test_loss'])
        ]
        assert log_metric_names == expected
    else:
        expected = [
            (0, ['epoch', 'train_some_val']),
            (0, ['early_stop_on', 'epoch', 'val_acc']),
            (1, ['epoch', 'test_acc', 'test_loss'])
        ]
        assert log_metric_names == expected
예제 #2
0
def _test_loggers_fit_test(tmpdir, logger_class):
    class CustomModel(BoringModel):
        def training_step(self, batch, batch_idx):
            output = self.layer(batch)
            loss = self.loss(batch, output)
            self.log("train_some_val", loss)
            return {"loss": loss}

        def validation_epoch_end(self, outputs) -> None:
            avg_val_loss = torch.stack([x["x"] for x in outputs]).mean()
            self.log_dict({
                "early_stop_on": avg_val_loss,
                "val_loss": avg_val_loss**0.5
            })

        def test_epoch_end(self, outputs) -> None:
            avg_test_loss = torch.stack([x["y"] for x in outputs]).mean()
            self.log("test_loss", avg_test_loss)

    class StoreHistoryLogger(logger_class):
        def __init__(self, *args, **kwargs) -> None:
            super().__init__(*args, **kwargs)
            self.history = []

        def log_metrics(self, metrics, step):
            super().log_metrics(metrics, step)
            self.history.append((step, metrics))

    logger_args = _get_logger_args(logger_class, tmpdir)
    logger = StoreHistoryLogger(**logger_args)

    if logger_class == WandbLogger:
        # required mocks for Trainer
        logger.experiment.id = "foo"
        logger.experiment.project_name.return_value = "bar"

    if logger_class == CometLogger:
        logger.experiment.id = "foo"
        logger.experiment.project_name = "bar"

    if logger_class == TestTubeLogger:
        logger.experiment.version = "foo"
        logger.experiment.name = "bar"

    if logger_class == MLFlowLogger:
        logger = mock_mlflow_run_creation(logger,
                                          experiment_id="foo",
                                          run_id="bar")

    model = CustomModel()
    trainer = Trainer(
        max_epochs=1,
        logger=logger,
        limit_train_batches=1,
        limit_val_batches=1,
        log_every_n_steps=1,
        default_root_dir=tmpdir,
    )
    trainer.fit(model)
    trainer.test()

    log_metric_names = [(s, sorted(m.keys())) for s, m in logger.history]
    if logger_class == TensorBoardLogger:
        expected = [
            (0, ["hp_metric"]),
            (0, ["epoch", "train_some_val"]),
            (0, ["early_stop_on", "epoch", "val_loss"]),
            (0, ["hp_metric"]),
            (1, ["epoch", "test_loss"]),
        ]
        assert log_metric_names == expected
    else:
        expected = [
            (0, ["epoch", "train_some_val"]),
            (0, ["early_stop_on", "epoch", "val_loss"]),
            (1, ["epoch", "test_loss"]),
        ]
        assert log_metric_names == expected