Beispiel #1
0
    def setUp(self) -> None:
        if check_for_sklearn_backend():
            from delira.training import SklearnExperiment
            from sklearn.tree import DecisionTreeClassifier
            from sklearn.neural_network import MLPClassifier

            config = DeliraConfig()
            config.fixed_params = {
                "model": {},
                "training": {
                    "losses": {
                        "L1":
                            mean_absolute_error},
                    "optimizer_cls": None,
                    "optimizer_params": {},
                    "num_epochs": 2,
                    "metrics": {"mae": mean_absolute_error},
                    "lr_sched_cls": None,
                    "lr_sched_params": {}}
            }

            # run tests for estimator with and without partial_fit
            model_cls = [
                DecisionTreeClassifier,
                MLPClassifier
            ]

            experiment_cls = SklearnExperiment

        else:
            config = None
            model_cls = []
            experiment_cls = None

        len_train = 50
        len_test = 50

        self._test_cases = [
            {
                "config": config,
                "network_cls": _cls,
                "len_train": len_train,
                "len_test": len_test,
                "key_mapping": {"X": "X"},
                "metric_keys": {"L1": ("pred", "y"),
                                "mae": ("pred", "y")}
            } for _cls in model_cls
        ]
        self._experiment_cls = experiment_cls

        super().setUp()
Beispiel #2
0
    def setUp(self) -> None:
        if check_for_tf_eager_backend():
            import tensorflow as tf
            tf.enable_eager_execution()
            from delira.training import TfEagerExperiment

            config = DeliraConfig()
            config.fixed_params = {
                "model": {},
                "training": {
                    "losses": {
                        "L1": tf.losses.absolute_difference
                    },
                    "optimizer_cls": tf.train.AdamOptimizer,
                    "optimizer_params": {
                        "learning_rate": 1e-3
                    },
                    "num_epochs": 2,
                    "metrics": {
                        "mae": mean_absolute_error
                    },
                    "lr_sched_cls": None,
                    "lr_sched_params": {}
                }
            }
            model_cls = DummyNetworkTfEager
            experiment_cls = TfEagerExperiment

        else:
            config = None
            model_cls = None
            experiment_cls = None

        len_train = 100
        len_test = 50

        self._test_cases = [{
            "config": config,
            "network_cls": model_cls,
            "len_train": len_train,
            "len_test": len_test,
            "key_mapping": {
                "x": "data"
            },
        }]
        self._experiment_cls = experiment_cls

        super().setUp()
Beispiel #3
0
    def _resolve_params(self, config: typing.Union[DeliraConfig, None]):
        """
        Merges the given config with ``self.config``.
        If the same argument is given in both configs,
        the one from the currently given config is used here

        Parameters
        ----------
        config : :class:`DeliraConfig` or None
            the parameters to merge with ``self.config``


        Returns
        -------
        :class:`Parameters`
            the merged parameter instance

        """
        if config is None:
            config = DeliraConfig()

        if hasattr(self, "config") and isinstance(self.config, DeliraConfig):
            _config = copy.deepcopy(config)
            config = self.config
            config.update(_config, overwrite=True)

        return config
Beispiel #4
0
    def setUp(self) -> None:
        if check_for_chainer_backend():
            from delira.training import ChainerExperiment
            import chainer

            config = DeliraConfig()
            config.fixed_params = {
                "model": {},
                "training": {
                    "losses": {
                        "L1": chainer.functions.mean_absolute_error
                    },
                    "optimizer_cls": chainer.optimizers.Adam,
                    "optimizer_params": {},
                    "num_epochs": 2,
                    "metrics": {
                        "mae": mean_absolute_error
                    },
                    "lr_sched_cls": None,
                    "lr_sched_params": {}
                }
            }
            model_cls = DummyNetworkChainer
            experiment_cls = ChainerExperiment

        else:
            config = None
            model_cls = None
            experiment_cls = None

        len_train = 50
        len_test = 50

        self._test_cases = [{
            "config": config,
            "network_cls": model_cls,
            "len_train": len_train,
            "len_test": len_test,
            "key_mapping": {
                "x": "data"
            }
        }]
        self._experiment_cls = experiment_cls

        super().setUp()
Beispiel #5
0
    def setUp(self) -> None:
        if check_for_torchscript_backend():
            import torch
            from delira.training import TorchScriptExperiment

            config = DeliraConfig()
            config.fixed_params = {
                "model": {},
                "training": {
                    "losses": {
                        "L1": torch.nn.BCEWithLogitsLoss()
                    },
                    "optimizer_cls": torch.optim.Adam,
                    "optimizer_params": {},
                    "num_epochs": 2,
                    "metrics": {
                        "mae": mean_absolute_error
                    },
                    "lr_sched_cls": None,
                    "lr_sched_params": {}
                }
            }
            model_cls = DummyNetworkTorchScript
            experiment_cls = TorchScriptExperiment

        else:
            config = None
            model_cls = None
            experiment_cls = None

        len_train = 100
        len_test = 50

        self._test_cases = [{
            "config": config,
            "network_cls": model_cls,
            "len_train": len_train,
            "len_test": len_test,
            "key_mapping": {
                "x": "data"
            },
        }]
        self._experiment_cls = experiment_cls

        super().setUp()
Beispiel #6
0
 def __init__(self):
     dummy_config = DeliraConfig()
     dummy_config.fixed_params = {
         "model": {},
         "training": {
             "losses": {},
             "optimizer_cls": None,
             "optimizer_params": {},
             "num_epochs": 2,
             "lr_sched_cls": None,
             "lr_sched_params": {}}
     }
     super().__init__(dummy_config,
                      DummyNetwork,
                      key_mapping={},
                      name="TestExperiment",
                      trainer_cls=DummyTrainer,
                      predictor_cls=DummyPredictor)
Beispiel #7
0
set_debug_mode(False)

data_path = ""
config_path = ""
save_path = "/tmp/ContourUnet"

bone_label = None

data_path = os.path.expanduser(data_path)
save_path = os.path.expanduser(save_path)

num_epochs = 100

with open(config_path, "r") as f:
    config = DeliraConfig(**json.load(f))

base_transforms = [
    # HistogramEqualization(),
    RangeTransform((-1, 1)),
    # AddGridTransform()
]
train_specific_transforms = []
test_specific_transforms = []

train_transforms = Compose(base_transforms + train_specific_transforms)
test_transforms = Compose(base_transforms + test_specific_transforms)

if get_current_debug_mode():
    train_dir = "Test"
    test_dir = "Test"
config = DeliraConfig(
    fixed_model={
        "in_channels": 1,
        "num_classes": 3,
        "norm_layer": "Instance",
        "per_class": False
    },
    fixed_training={
        "optimizer_cls": RAdam,
        "optimizer_params": {
            "lr": 1e-3
        },
        "lr_sched_cls": ReduceLROnPlateauCallbackPyTorch,
        "lr_sched_params": {
            "mode": "max",
            "patience": 5
        },
        "losses": {
            "ce": torch.nn.CrossEntropyLoss(),
            "soft_dice": SoftDiceLossPyTorch(non_lin=torch.nn.Softmax(dim=1))
        },
        "metrics": {
            "dice": dice_score_including_background
        },
        "num_epochs": 300,  # number of epochs to train
    },
    val_split=0.2,
    seed=0,
    batchsize=1,
    checkpoint_freq=1,
    gpu_ids=[0],
    val_score_key="dice",
    val_score_mode="highest",
    verbose=True,
    metric_keys={"dice": ("pred", "label")})