Beispiel #1
0
    def _test_fit_loader(device):

        seed_all(42)
        save_dir = tmpdir.mkdir("/test_save")
        file_name = "test_classification.bin"
        dataset = torch.utils.data.TensorDataset(X, y)
        test_dataset = create_ds((test_data, ))
        loader = torch.utils.data.DataLoader(dataset, batch_size=32)
        test_dl = torch.utils.data.DataLoader(test_dataset, batch_size=16)

        metric_list = [
            Accuracy(num_classes=num_classes),
        ]

        callbacks = [
            cbs.EarlyStopping(monitor="val_accuracy", mode="max"),
            cbs.ModelCheckpoint(monitor="val_accuracy",
                                mode="max",
                                save_dir=save_dir,
                                file_name=file_name),
            cbs.CosineAnnealingWarmRestarts(T_0=2),
        ]

        exp = Experiment(
            num_epochs=10,
            fp16=fp16,
            device=device,
            seed=42,
        )

        exp.compile_experiment(
            model_config=config,
            metrics=metric_list,
            callbacks=callbacks,
            main_metric="accuracy",
        )

        assert isinstance(exp.state, State) is True
        exp.fit_loader(train_dl=loader, valid_dl=loader)
        logs = exp.get_logs()
        assert isinstance(logs, pd.DataFrame) is True
        outputs = []
        for op in exp.predict_on_loader(test_dl=test_dl,
                                        device=device,
                                        path_to_model=os.path.join(
                                            save_dir, file_name)):
            outputs.extend(op)

        assert len(outputs) == test_samples
Beispiel #2
0
    def _test_dict_inputs(device):

        seed_all(42)
        exp_config = ModelConfig(
            nn_module={
                "model_A": Model,
                "model_B": Model
            },
            module_params={
                "model_A": {
                    "num_features": num_features,
                    "num_classes": num_classes
                },
                "model_B": {
                    "num_features": num_features,
                    "num_classes": num_classes
                },
            },
            optimizer={
                "model_A": optimizer,
                "model_B": optimizer
            },
            optimizer_params={
                "model_A": {
                    "lr": optimizer_lr
                },
                "model_B": {
                    "lr": optimizer_lr
                }
            },
            criterion=criterion,
        )

        exp = Experiment(
            num_epochs=10,
            fp16=fp16,
            device=device,
            seed=42,
        )
        exp.compile_experiment(
            model_config=exp_config,
            metrics=None,
            callbacks=None,
            main_metric="val_loss",
        )
        assert isinstance(exp.state, State) is True
        assert len(exp.state.model.keys()) == 2
        assert len(exp.state.optimizer.keys()) == 2
    def _test_fit_loader(device):

        seed_all(42)
        save_dir = tmpdir.mkdir("/test_save")
        file_name = "test_classification.bin"
        dataset = torch.utils.data.TensorDataset(X, y)
        test_dataset = torch.utils.data.TensorDataset(test_data)
        loader = torch.utils.data.DataLoader(dataset, batch_size=32)
        test_dl = torch.utils.data.DataLoader(test_dataset, batch_size=16)
        model = torch.nn.Linear(num_features, num_classes)

        metric_list = [
            metrics.Accuracy(num_classes=num_classes, multilabel=False),
            metrics.F1Score(num_classes=num_classes, multilabel=False),
        ]

        callbacks = [
            cbs.EarlyStopping(monitor="accuracy", mode="max"),
            cbs.ModelCheckpoint(monitor="accuracy", mode="max", save_dir=save_dir, file_name=file_name),
            cbs.CosineAnnealingWarmRestarts(T_0=2),
        ]

        exp = Experiment(
            num_epochs=10,
            fp16=fp16,
            device=device,
            seed=42,
        )

        exp.compile_experiment(
            model=model,
            metrics=metric_list,
            callbacks=callbacks,
            main_metric="accuracy",
            optimizer=optimizer,
            optimizer_params=optimizer_params,
            criterion=criterion,
        )
        exp.fit_loader(train_dl=loader, valid_dl=loader)
        exp.plot_history(keys=["accuracy"], save_fig=False, plot_fig=False)
        outputs = []
        for op in exp.predict_on_loader(
            test_dl=test_dl, device=device, path_to_model=os.path.join(save_dir, file_name)
        ):
            outputs.extend(op)

        assert len(outputs) == test_samples
Beispiel #4
0
    def _test_dict_inputs_error(device):

        seed_all(42)
        exp_config = ModelConfig(
            nn_module={
                "model_A": Model,
                "model_B": Model
            },
            module_params={
                "model_A": {
                    "num_features": num_features,
                    "num_classes": num_classes
                },
                "model_B": {
                    "num_features": num_features,
                    "num_classes": num_classes
                },
            },
            optimizer=optimizer,
            optimizer_params={"lr": optimizer_lr},
            criterion=criterion,
        )

        exp = Experiment(
            num_epochs=10,
            fp16=fp16,
            device=device,
            seed=42,
        )
        with pytest.raises(ValueError):
            exp.compile_experiment(
                model_config=exp_config,
                metrics=None,
                callbacks=None,
                main_metric="val_loss",
            )
Beispiel #5
0
 def initialise(self):
     """Method initialise some stuff."""
     seed_all(self.seed)
     self._model_to_device()
     self._reset_model_logs()