コード例 #1
0
ファイル: test_experiments.py プロジェクト: sskram/delira
    def test_experiment_test_torch(self):
        from delira.training import PyTorchExperiment
        from delira.data_loading import BaseDataManager

        for case in self._test_cases_torch:
            with self.subTest(case=case):
                (params, dataset_length_train, dataset_length_test,
                 val_score_key, val_score_mode, network_cls) = case

                exp = PyTorchExperiment(params, network_cls,
                                        key_mapping={"x": "data"},
                                        val_score_key=val_score_key,
                                        val_score_mode=val_score_mode)

                model = network_cls()

                dset_test = DummyDataset(dataset_length_test)
                dmgr_test = BaseDataManager(dset_test, 16, 1, None)

                prepare_batch = partial(
                    model.prepare_batch,
                    output_device="cpu",
                    input_device="cpu")

                exp.test(model, dmgr_test,
                         params.nested_get("val_metrics"),
                         prepare_batch=prepare_batch)
コード例 #2
0
def test_experiment(params, dataset_length_train, dataset_length_test):
    class DummyNetwork(ClassificationNetworkBasePyTorch):
        def __init__(self):
            super().__init__(32, 1)

        def forward(self, x):
            return self.module(x)

        @staticmethod
        def _build_model(in_channels, n_outputs):
            return torch.nn.Sequential(torch.nn.Linear(in_channels, 64),
                                       torch.nn.ReLU(),
                                       torch.nn.Linear(64, n_outputs))

    class DummyDataset(AbstractDataset):
        def __init__(self, length):
            super().__init__(None, None, None, None)
            self.length = length

        def __getitem__(self, index):
            return {
                "data": np.random.rand(1, 32),
                "label": np.random.rand(1, 1)
            }

        def __len__(self):
            return self.length

    exp = PyTorchExperiment(params, DummyNetwork)
    dset_train = DummyDataset(dataset_length_train)
    dset_test = DummyDataset(dataset_length_test)

    dmgr_train = BaseDataManager(dset_train, 16, 4, None)
    dmgr_test = BaseDataManager(dset_test, 16, 1, None)

    net = exp.run(dmgr_train, dmgr_test)
    exp.test(
        params=params,
        network=net,
        datamgr_test=dmgr_test,
    )
コード例 #3
0
ファイル: test_experiments.py プロジェクト: sskram/delira
    def test_experiment_run_torch(self):

        from delira.training import PyTorchExperiment
        from delira.data_loading import BaseDataManager

        for case in self._test_cases_torch:
            with self.subTest(case=case):

                (params, dataset_length_train, dataset_length_test,
                 val_score_key, val_score_mode, network_cls) = case

                exp = PyTorchExperiment(params, network_cls,
                                        key_mapping={"x": "data"},
                                        val_score_key=val_score_key,
                                        val_score_mode=val_score_mode)

                dset_train = DummyDataset(dataset_length_train)
                dset_test = DummyDataset(dataset_length_test)

                dmgr_train = BaseDataManager(dset_train, 16, 2, None)
                dmgr_test = BaseDataManager(dset_test, 16, 1, None)

                exp.run(dmgr_train, dmgr_test)
コード例 #4
0
ファイル: test_experiments.py プロジェクト: sskram/delira
    def test_experiment_kfold_torch(self):
        from delira.training import PyTorchExperiment
        from delira.data_loading import BaseDataManager
        from copy import deepcopy

        # all test cases
        for case in self._test_cases_torch:
            with self.subTest(case=case):
                (params, dataset_length_train,
                 dataset_length_test, val_score_key,
                 val_score_mode, network_cls) = case

                # both split_types
                for split_type in ["random", "stratified", "error"]:
                    with self.subTest(split_type=split_type):
                        if split_type == "error":
                            # must raise ValueError
                            with self.assertRaises(ValueError):
                                exp = PyTorchExperiment(
                                    params, network_cls,
                                    key_mapping={"x": "data"},
                                    val_score_key=val_score_key,
                                    val_score_mode=val_score_mode)

                                dset = DummyDataset(
                                    dataset_length_test + dataset_length_train)

                                dmgr = BaseDataManager(dset, 16, 1, None)
                                exp.kfold(
                                    dmgr,
                                    params.nested_get("val_metrics"),
                                    shuffle=True,
                                    split_type=split_type,
                                    num_splits=2)

                            continue

                        # check all types of validation data
                        for val_split in [0.2, None]:
                            with self.subTest(val_split=val_split):

                                # disable lr scheduling if no validation data
                                # is present
                                _params = deepcopy(params)
                                if val_split is None:
                                    _params["fixed"]["training"
                                                     ]["lr_sched_cls"] = None
                                exp = PyTorchExperiment(
                                    _params, network_cls,
                                    key_mapping={"x": "data"},
                                    val_score_key=val_score_key,
                                    val_score_mode=val_score_mode)

                                dset = DummyDataset(
                                    dataset_length_test + dataset_length_train)

                                dmgr = BaseDataManager(dset, 16, 1, None)
                                exp.kfold(
                                    dmgr,
                                    params.nested_get("val_metrics"),
                                    shuffle=True,
                                    split_type=split_type,
                                    val_split=val_split,
                                    num_splits=2)
コード例 #5
0
mgr_val = DataManager(dset_val,
                      config.batchsize,
                      4,
                      test_transforms,
                      sampler_cls=SequentialSampler)
mgr_test = DataManager(dset_test,
                       config.batchsize,
                       4,
                       test_transforms,
                       sampler_cls=SequentialSampler)

experiment = PyTorchExperiment(
    config,
    UNetTorch,
    name="BaselineUnetFocalLoss",
    save_path=save_path,
    checkpoint_freq=config.checkpoint_freq,
    gpu_ids=config.gpu_ids,
    val_score_key=config.val_score_key,
    metric_keys=config.metric_keys,
)

experiment.save()
net = experiment.run(mgr_train,
                     mgr_val,
                     val_score_mode=config.val_score_mode,
                     verbose=config.verbose)
net.eval()
experiment.test(
    net,
    mgr_test,
    verbose=config.verbose,
コード例 #6
0
def run_experiment(cp: str, test=True) -> str:
    """
    Run classification experiment on patches
    Imports moved inside because of logging setups

    Parameters
    ----------
    ch : str
        path to config file
    test : bool
        test best model on test set

    Returns
    -------
    str
        path to experiment folder
    """
    # setup config
    ch = ConfigHandlerPyTorchDelira(cp)
    ch = feature_map_params(ch)

    if 'mixed_precision' not in ch or ch['mixed_precision'] is None:
        ch['mixed_precision'] = True
    if 'debug_delira' in ch and ch['debug_delira'] is not None:
        delira.set_debug_mode(ch['debug_delira'])
        print("Debug mode active: settings n_process_augmentation to 1!")
        ch['augment.n_process'] = 1

    dset_keys = ['train', 'val', 'test']

    losses = {'class_ce': torch.nn.CrossEntropyLoss()}
    train_metrics = {}
    val_metrics = {'CE': metric_wrapper_pytorch(torch.nn.CrossEntropyLoss())}
    test_metrics = {'CE': metric_wrapper_pytorch(torch.nn.CrossEntropyLoss())}

    #########################
    #   Setup Parameters    #
    #########################
    params_dict = ch.get_params(losses=losses,
                                train_metrics=train_metrics,
                                val_metrics=val_metrics,
                                add_self=ch['add_config_to_params'])
    params = Parameters(**params_dict)

    #################
    #   Setup IO    #
    #################
    # setup io
    load_sample = load_pickle
    load_fn = LoadPatches(load_fn=load_sample,
                          patch_size=ch['patch_size'],
                          **ch['data.load_patch'])

    datasets = {}
    for key in dset_keys:
        p = os.path.join(ch["data.path"], str(key))

        datasets[key] = BaseExtendCacheDataset(p,
                                               load_fn=load_fn,
                                               **ch['data.kwargs'])

    #############################
    #   Setup Transformations   #
    #############################
    base_transforms = []
    base_transforms.append(PopKeys("mapping"))

    train_transforms = []
    if ch['augment.mode']:
        logger.info("Training augmentation enabled.")
        train_transforms.append(
            SpatialTransform(patch_size=ch['patch_size'],
                             **ch['augment.kwargs']))
        train_transforms.append(MirrorTransform(axes=(0, 1)))
    process = ch['augment.n_process'] if 'augment.n_process' in ch else 1

    #########################
    #   Setup Datamanagers  #
    #########################
    datamanagers = {}
    for key in dset_keys:
        if key == 'train':
            trafos = base_transforms + train_transforms
            sampler = WeightedPrevalenceRandomSampler
        else:
            trafos = base_transforms
            sampler = SequentialSampler

        datamanagers[key] = BaseDataManager(
            data=datasets[key],
            batch_size=params.nested_get('batch_size'),
            n_process_augmentation=process,
            transforms=Compose(trafos),
            sampler_cls=sampler,
        )

    #############################
    #   Initialize Experiment   #
    #############################
    experiment = \
        PyTorchExperiment(
            params=params,
            model_cls=ClassNetwork,
            name=ch['exp.name'],
            save_path=ch['exp.dir'],
            optim_builder=create_optims_default_pytorch,
            trainer_cls=PyTorchNetworkTrainer,
            mixed_precision=ch['mixed_precision'],
            mixed_precision_kwargs={'verbose': False},
            key_mapping={"input_batch": "data"},
            **ch['exp.kwargs'],
        )

    # save configurations
    ch.dump(os.path.join(experiment.save_path, 'config.json'))

    #################
    #   Training    #
    #################
    model = experiment.run(datamanagers['train'],
                           datamanagers['val'],
                           save_path_exp=experiment.save_path,
                           ch=ch,
                           metric_keys={'val_CE': ['pred', 'label']},
                           val_freq=1,
                           verbose=True)
    ################
    #   Testing    #
    ################
    if test and datamanagers['test'] is not None:
        # metrics and metric_keys are used differently than in original
        # Delira implementation in order to support Evaluator
        # see mscl.training.predictor
        preds = experiment.test(
            network=model,
            test_data=datamanagers['test'],
            metrics=test_metrics,
            metric_keys={'CE': ['pred', 'label']},
            verbose=True,
        )

        softmax_fn = metric_wrapper_pytorch(
            partial(torch.nn.functional.softmax, dim=1))
        preds = softmax_fn(preds[0]['pred'])
        labels = [d['label'] for d in datasets['test']]
        fpr, tpr, thresholds = roc_curve(labels, preds[:, 1])
        roc_auc = auc(fpr, tpr)

        plt.plot(fpr, tpr, label='ROC (AUC = %0.2f)' % roc_auc)
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        plt.title('Receiver operating characteristic example')
        plt.legend(loc="lower right")
        plt.savefig(os.path.join(experiment.save_path, 'test_roc.pdf'))
        plt.close()

        preds = experiment.test(
            network=model,
            test_data=datamanagers['val'],
            metrics=test_metrics,
            metric_keys={'CE': ['pred', 'label']},
            verbose=True,
        )

        preds = softmax_fn(preds[0]['pred'])
        labels = [d['label'] for d in datasets['val']]
        fpr, tpr, thresholds = roc_curve(labels, preds[:, 1])
        roc_auc = auc(fpr, tpr)

        plt.plot(fpr, tpr, label='ROC (AUC = %0.2f)' % roc_auc)
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        plt.title('Receiver operating characteristic example')
        plt.legend(loc="lower right")
        plt.savefig(os.path.join(experiment.save_path, 'best_val_roc.pdf'))
        plt.close()

    return experiment.save_path
コード例 #7
0
manager_val = BaseDataManager(dataset_val, params.nested_get("batch_size"),
                              transforms=Compose(base_transforms),
                              sampler_cls=SequentialSampler,
                              n_process_augmentation=n_process_augmentation)

manager_test = BaseDataManager(dataset_test, 1,
                               transforms=Compose(base_transforms),
                               sampler_cls=SequentialSampler,
                               n_process_augmentation=n_process_augmentation)

logger.info("Init Experiment")
experiment = PyTorchExperiment(params,
                               ClassificationNetworkBasePyTorch,
                               name=exp_name,
                               save_path=save_path,
                               checkpoint_freq=checkpoint_freq,
                               optim_builder=create_optims_default_pytorch,
                               gpu_ids=[0],
                               mixed_precision=False,
                               val_score_key=val_score_key,
                               )
experiment.save()
net = experiment.run(manager_train, manager_val,
                     val_score_mode=val_score_mode,
                     verbose=True,
                     )

experiment.test(net, manager_test,
                verbose=True,
                )
コード例 #8
0
ファイル: delira_train.py プロジェクト: DecentMakeover/verse_
                                sampler_cls=RandomSampler,
                                n_process_augmentation=4)

manager_val = BaseDataManager(dataset_val,
                              params.nested_get("batch_size"),
                              transforms=transforms,
                              sampler_cls=SequentialSampler,
                              n_process_augmentation=4)

import warnings
warnings.simplefilter(
    "ignore", UserWarning)  # ignore UserWarnings raised by dependency code
warnings.simplefilter(
    "ignore", FutureWarning)  # ignore FutureWarnings raised by dependency code

from delira.training import PyTorchExperiment
from delira.training.train_utils import create_optims_default_pytorch
from delira.models.segmentation import UNet3dPyTorch

# logger.info("Init Experiment")
print(create_optims_default_pytorch)
experiment = PyTorchExperiment(params,
                               UNet3dPyTorch,
                               name="Segmentation3dExample",
                               save_path="./tmp/delira_Experiments",
                               optim_builder=create_optims_default_pytorch,
                               gpu_ids=[0],
                               mixed_precision=True)
experiment.save()
print(manager_train, manager_val)
model = experiment.run(manager_train, manager_val)
コード例 #9
0
    def test_experiment(self):

        from delira.training import PyTorchExperiment, Parameters
        from delira.training.callbacks import ReduceLROnPlateauCallbackPyTorch
        from delira.models.classification import ClassificationNetworkBasePyTorch
        from delira.data_loading import AbstractDataset, BaseDataManager
        import torch

        test_cases = [(Parameters(
            fixed_params={
                "model": {},
                "training": {
                    "criterions": {
                        "CE": torch.nn.CrossEntropyLoss()
                    },
                    "optimizer_cls": torch.optim.Adam,
                    "optimizer_params": {
                        "lr": 1e-3
                    },
                    "num_epochs": 2,
                    "metrics": {},
                    "lr_sched_cls": ReduceLROnPlateauCallbackPyTorch,
                    "lr_sched_params": {}
                }
            }), 500, 50)]

        class DummyNetwork(ClassificationNetworkBasePyTorch):
            def __init__(self):
                super().__init__(32, 1)

            def forward(self, x):
                return self.module(x)

            @staticmethod
            def _build_model(in_channels, n_outputs):
                return torch.nn.Sequential(torch.nn.Linear(in_channels, 64),
                                           torch.nn.ReLU(),
                                           torch.nn.Linear(64, n_outputs))

            @staticmethod
            def prepare_batch(batch_dict, input_device, output_device):
                return {
                    "data":
                    torch.from_numpy(batch_dict["data"]).to(
                        input_device, torch.float),
                    "label":
                    torch.from_numpy(batch_dict["label"]).to(
                        output_device, torch.long)
                }

        class DummyDataset(AbstractDataset):
            def __init__(self, length):
                super().__init__(None, None, None, None)
                self.length = length

            def __getitem__(self, index):
                return {
                    "data": np.random.rand(32),
                    "label": np.random.randint(0, 1, 1)
                }

            def __len__(self):
                return self.length

            def get_sample_from_index(self, index):
                return self.__getitem__(index)

        for case in test_cases:
            with self.subTest(case=case):

                params, dataset_length_train, dataset_length_test = case

                exp = PyTorchExperiment(params, DummyNetwork)
                dset_train = DummyDataset(dataset_length_train)
                dset_test = DummyDataset(dataset_length_test)

                dmgr_train = BaseDataManager(dset_train, 16, 4, None)
                dmgr_test = BaseDataManager(dset_test, 16, 1, None)

                net = exp.run(dmgr_train, dmgr_test)
                exp.test(
                    params=params,
                    network=net,
                    datamgr_test=dmgr_test,
                )

                exp.kfold(2, dmgr_train, num_splits=2)
                exp.stratified_kfold(2, dmgr_train, num_splits=2)
                exp.stratified_kfold_predict(2, dmgr_train, num_splits=2)
コード例 #10
0
def train(model_cls,
          model_kwargs: dict,
          outpath: str,
          data_path,
          exp_name=None,
          batchsize=64,
          num_epochs=1500,
          checkpoint_freq=10,
          additional_losses: dict = None,
          dset_type="mnist",
          key_mapping=None,
          create_optim_fn=None):

    if exp_name is None:
        exp_name = model_cls.__name__

    if additional_losses is None:
        additional_losses = {}

    if create_optim_fn is None:
        create_optim_fn = create_optims

    outpath = os.path.expanduser(outpath)

    losses = {"adversarial": AdversarialLoss()}
    losses.update(additional_losses)
    params = Parameters(
        fixed_params={
            "model": {
                **model_kwargs
            },
            "training": {
                "num_epochs": num_epochs,
                "batchsize": batchsize,
                "losses": losses,
                "val_metrics": {},
                "optimizer_cls": torch.optim.Adam,
                "optimizer_params": {
                    "lr": 0.001,
                    "betas": (0.5, 0.9995)
                },
                "scheduler_cls": None,
                "scheduler_params": {}
            }
        })
    data = setup_data(data_path, params.nested_get("batchsize"), 4,
                      RangeTransform(), RangeTransform(), dset_type)
    exp = PyTorchExperiment(params,
                            model_cls,
                            params.nested_get("num_epochs"),
                            name=exp_name,
                            save_path=outpath,
                            key_mapping=key_mapping,
                            optim_builder=create_optim_fn,
                            checkpoint_freq=checkpoint_freq,
                            gpu_ids=[0])

    model = exp.run(data["train"], data["val"])
    weight_dir = os.path.join(exp.save_path, "checkpoints", "run_00")

    return model, weight_dir