Esempio n. 1
0
class TfExperimentTest(unittest.TestCase):
    @unittest.skipIf("TF" not in delira.get_backends(),
                     reason="No TF Backend installed")
    def test_experiment(self):

        from delira.training import TfExperiment, Parameters
        from delira.models.classification import ClassificationNetworkBaseTf
        from delira.data_loading import AbstractDataset, BaseDataManager
        import tensorflow as tf

        test_cases = [(Parameters(
            fixed_params={
                "model": {
                    'in_channels': 32,
                    'n_outputs': 1
                },
                "training": {
                    "criterions": {
                        "CE": tf.losses.softmax_cross_entropy
                    },
                    "optimizer_cls": tf.train.AdamOptimizer,
                    "optimizer_params": {
                        "learning_rate": 1e-3
                    },
                    "num_epochs": 2,
                    "metrics": {},
                    "lr_sched_cls": None,
                    "lr_sched_params": {}
                }
            }), 500, 50)]

        class DummyNetwork(ClassificationNetworkBaseTf):
            def __init__(self):
                super().__init__(32, 1)
                self.model = self._build_model(1)

                images = tf.placeholder(shape=[None, 32], dtype=tf.float32)
                labels = tf.placeholder(shape=[None, 1], dtype=tf.float32)

                preds_train = self.model(images, training=True)
                preds_eval = self.model(images, training=False)

                self.inputs = [images, labels]
                self.outputs_train = [preds_train]
                self.outputs_eval = [preds_eval]

            @staticmethod
            def _build_model(n_outputs):
                return tf.keras.models.Sequential(layers=[
                    tf.keras.layers.Dense(64,
                                          input_shape=(32, ),
                                          bias_initializer='glorot_uniform'),
                    tf.keras.layers.ReLU(),
                    tf.keras.layers.Dense(n_outputs,
                                          bias_initializer='glorot_uniform')
                ])

        class DummyDataset(AbstractDataset):
            def __init__(self, length):
                super().__init__(None, None, None, None)
                self.length = length

            def __getitem__(self, index):
                return {
                    "data": np.random.rand(32),
                    "label": np.random.randint(0, 1, 1)
                }

            def __len__(self):
                return self.length

            def get_sample_from_index(self, index):
                return self.__getitem__(index)

        for case in test_cases:
            with self.subTest(case=case):

                params, dataset_length_train, dataset_length_test = case

                exp = TfExperiment(params, DummyNetwork)
                dset_train = DummyDataset(dataset_length_train)
                dset_test = DummyDataset(dataset_length_test)

                dmgr_train = BaseDataManager(dset_train, 16, 4, None)
                dmgr_test = BaseDataManager(dset_test, 16, 1, None)

                net = exp.run(dmgr_train, dmgr_test)
                exp.test(
                    params=params,
                    network=net,
                    datamgr_test=dmgr_test,
                )

                exp.kfold(2, dmgr_train, num_splits=2)
                exp.stratified_kfold(2, dmgr_train, num_splits=2)
                exp.stratified_kfold_predict(2, dmgr_train, num_splits=2)
Esempio n. 2
0
from delira import get_backends

if "TORCH" in get_backends():
    import torch
    import torch.nn.functional as F

    class BCEFocalLossPyTorch(torch.nn.Module):
        """
        Focal loss for binary case without(!) logit

        """
        def __init__(self, alpha=None, gamma=2, reduction='elementwise_mean'):
            """
            Implements Focal Loss for binary class case

            Parameters
            ----------
            alpha : float
                alpha has to be in range [0,1], assigns class weight
            gamma : float
                focusing parameter
            reduction : str
                Specifies the reduction to apply to the output: ‘none’ |
                ‘elementwise_mean’ | ‘sum’.
                    ‘none’: no reduction will be
                        applied,
                    ‘elementwise_mean’: the sum of the output will be divided
                        by the number of elements in the output,
                    ‘sum’: the output will be summed
            (further information about parameters above can be found in pytorch
            documentation)
Esempio n. 3
0
from delira import get_backends
from delira.training.callbacks.abstract_callback import AbstractCallback

if 'TORCH' in get_backends():
    from torch.optim.lr_scheduler import ReduceLROnPlateau, \
        CosineAnnealingLR, ExponentialLR, LambdaLR, MultiStepLR, StepLR

    class DefaultPyTorchSchedulerCallback(AbstractCallback):
        """
        Implements a Callback, which `at_epoch_end` function is suitable for
        most schedulers

        """
        def __init__(self, *args, **kwargs):
            """

            Parameters
            ----------
            *args :
                Arbitrary Positional Arguments
            **kwargs :
                Arbitrary Keyword Arguments

            """
            super().__init__()

            self.scheduler = None

        def at_epoch_end(self, trainer, **kwargs):
            """
            Executes a single scheduling step
Esempio n. 4
0
class TorchModelTest(unittest.TestCase):
    @unittest.skipIf((virtual_memory().total / 1024.**3) < 20,
                     reason="Less than 20GB of memory")
    @unittest.skipIf("TORCH" not in get_backends(),
                     reason="torch backend not installed")
    def test_pytorch_model_default(self):
        from delira.models import UNet2dPyTorch, \
            UNet3dPyTorch, ClassificationNetworkBasePyTorch, \
            VGG3DClassificationNetworkPyTorch, \
            GenerativeAdversarialNetworkBasePyTorch
        from delira.training.train_utils import \
            create_optims_default_pytorch, create_optims_gan_default_pytorch
        from delira.utils.context_managers import DefaultOptimWrapperTorch
        import torch

        test_cases = [
            # UNet 2D
            (
                UNet2dPyTorch(5, in_channels=1),  # model
                (1, 32, 32),  # input shape
                (32, 32),  # output shape
                {
                    "loss_fn": torch.nn.CrossEntropyLoss()
                },  # loss function
                create_optims_default_pytorch,  # optim_fn
                4,  # output range (num_classes -1)
                True  # half precision
            ),
            # UNet 3D
            (
                UNet3dPyTorch(5, in_channels=1),  # model
                (1, 32, 32, 32),  # input shape
                (32, 32, 32),  # output shape
                {
                    "loss_fn": torch.nn.CrossEntropyLoss()
                },  # loss function
                create_optims_default_pytorch,  # optim_fn
                4,  # output range (num_classes) - 1
                True  # half precision
            ),
            # Base Classifier (Resnet 18)
            (
                ClassificationNetworkBasePyTorch(1, 10),
                # model
                (1, 224, 224),  # input shape
                9,  # output shape (num_classes - 1)
                {
                    "loss_fn": torch.nn.CrossEntropyLoss()
                },  # loss function
                create_optims_default_pytorch,  # optim_fn
                None,  # no max_range needed
                True  # half precision
            ),
            # 3D VGG
            (
                VGG3DClassificationNetworkPyTorch(1, 10),
                # model
                (1, 32, 224, 224),  # input shape
                9,  # output shape (num_classes - 1)
                {
                    "loss_fn": torch.nn.CrossEntropyLoss()
                },  # loss function
                create_optims_default_pytorch,  # optim fn
                None,  # no max_range needed
                True  # half precision
            ),
            # DCGAN
            (
                GenerativeAdversarialNetworkBasePyTorch(1, 100),
                # model
                (1, 64, 64),  # input shape
                (1, 1),  # arbitrary shape (not needed)
                {
                    "loss_fn": torch.nn.MSELoss()
                },  # loss
                create_optims_gan_default_pytorch,
                # optimizer function
                1,  # standard max range
                True  # half precision
            )
        ]

        for case in test_cases:
            with self.subTest(case=case):

                model, input_shape, output_shape, loss_fn, optim_fn, \
                    max_range, half_precision = case

                try:
                    from apex import amp
                    amp_handle = amp.init(half_precision)
                    wrapper_fn = amp_handle.wrap_optimizer
                except ImportError:
                    wrapper_fn = DefaultOptimWrapperTorch

                start_time = time.time()

                # test backward if optimizer fn is not None
                if optim_fn is not None:
                    optim = {
                        k: wrapper_fn(v, num_loss=len(loss_fn))
                        for k, v in optim_fn(model, torch.optim.Adam).items()
                    }

                else:
                    optim = {}

                closure = model.closure
                device = torch.device("cpu")
                model = model.to(device)
                prepare_batch = model.prepare_batch

                # classification label: target_shape specifies max label
                if isinstance(output_shape, int):
                    label = np.asarray(
                        [np.random.randint(output_shape) for i in range(10)])
                else:
                    label = np.random.rand(10, *output_shape) * max_range

                data_dict = {
                    "data": np.random.rand(10, *input_shape),
                    "label": label
                }

                try:
                    data_dict = prepare_batch(data_dict, device, device)
                    closure(model, data_dict, optim, loss_fn, {})
                except Exception as e:
                    assert False, "Test for %s not passed: Error: %s" \
                        % (model.__class__.__name__, e)

                end_time = time.time()

                print("Time needed for %s: %.3f" %
                      (model.__class__.__name__, end_time - start_time))

                del device
                del optim
                del closure
                del prepare_batch
                del model
                try:
                    del amp_handle
                except NameError:
                    pass
                gc.collect()
Esempio n. 5
0
class TfModelTest(unittest.TestCase):
    @unittest.skipIf((virtual_memory().total / 1024.**3) < 20,
                     reason="Less than 20GB of memory")
    @unittest.skipIf("TF" not in get_backends(),
                     reason="No TF Backend Installed")
    def test_tf_model_default(self):
        from delira.models import ClassificationNetworkBaseTf
        from delira.training.train_utils import create_optims_default_tf
        import tensorflow as tf
        test_cases = [(
            ClassificationNetworkBaseTf(1, 10),
            # model
            (1, 224, 224),  # input shape
            9,  # output shape (num_classes - 1)
            {
                "loss_fn": tf.losses.softmax_cross_entropy
            },  # loss function
            create_optims_default_tf,  # optim_fn
            4)]

        for case in test_cases:
            with self.subTest(case=case):
                model, input_shape, output_shape, loss_fns, optim_fn, \
                    max_range = case
                start_time = time.time()

                # test backward if optimizer fn is not None
                if optim_fn is not None:
                    optim = optim_fn(tf.train.AdamOptimizer)

                else:
                    optim = {}

                model._add_losses(loss_fns)
                model._add_optims(optim)
                model._sess.run(tf.initializers.global_variables())

                closure = model.closure

                # classification label: target_shape specifies max label
                if isinstance(output_shape, int):
                    label = np.asarray([
                        np.random.randint(output_shape) for i in range(10)
                    ])[np.newaxis, :]
                else:
                    label = np.random.rand(10, *output_shape) * max_range

                data_dict = {
                    "data": np.random.rand(10, *input_shape),
                    "label": label
                }

                try:
                    closure(model, data_dict)
                except Exception as e:
                    assert False, "Test for %s not passed: Error: %s" \
                        % (model.__class__.__name__, e)

                end_time = time.time()

                print("Time needed for %s: %.3f" %
                      (model.__class__.__name__, end_time - start_time))

                del optim
                del closure
                del model
                gc.collect()