コード例 #1
0
    def get_epochs_to_test(self):
        if self.args.test_all_epochs:
            # Get all saved points and test them
            epochs_tested = [
                list(
                    range(self.args.nb_epochs_per_saving, self.args.nb_epochs,
                          self.args.nb_epochs_per_saving)) +
                [self.args.nb_epochs - 1] for _ in range(self.args.nb_folds)
            ]
        elif self.args.test_best_epoch:
            # Get the best point of each fold according to a certain metric (early stopping)
            metric = self.args.test_best_epoch
            h_val = History.load_from_dir(
                self.args.checkpoint_dir,
                "Validation_%s" % (self.args.exp_name or ""),
                self.args.nb_folds - 1, self.args.nb_epochs - 1)
            epochs_tested = h_val.get_best_epochs(metric,
                                                  highest=True).reshape(-1, 1)
        else:
            # Get the last point and test it, for each fold
            epochs_tested = [[self.args.nb_epochs - 1]
                             for _ in range(self.args.nb_folds)]

        return epochs_tested
コード例 #2
0
ファイル: core.py プロジェクト: Duplums/pynet
    def training(self, manager: AbstractDataManager, nb_epochs: int, checkpointdir=None,
                 fold_index=None, epoch_index=None,
                 scheduler=None, with_validation=True, with_visualization=False,
                 nb_epochs_per_saving=1, exp_name=None, standard_optim=True,
                 gpu_time_profiling=False, **kwargs_train):
        """ Train the model.

        Parameters
        ----------
        manager: a pynet DataManager
            a manager containing the train and validation data.
        nb_epochs: int, default 100
            the number of epochs.
        checkpointdir: str, default None
            a destination folder where intermediate models/historues will be
            saved.
        fold_index: int or [int] default None
            the index(es) of the fold(s) to use for the training, default use all the
            available folds.
        epoch_index: int, default None
            the iteration where to start the counting from
        scheduler: torch.optim.lr_scheduler, default None
            a scheduler used to reduce the learning rate.
        with_validation: bool, default True
            if set use the validation dataset.
        with_visualization: bool, default False,
            whether it uses a visualizer that will plot the losses/metrics/images in a WebApp framework
            during the training process
        nb_epochs_per_saving: int, default 1,
            the number of epochs after which the model+optimizer's parameters are saved
        exp_name: str, default None
            the experience name that will be launched
        Returns
        -------
        train_history, valid_history: History
            the train/validation history.
        """

        train_history = History(name="Train_%s"%(exp_name or ""))
        if with_validation is not None:
            valid_history = History(name="Validation_%s"%(exp_name or ""))
        else:
            valid_history = None
        train_visualizer, valid_visualizer = None, None
        if with_visualization:
            train_visualizer = Visualizer(train_history)
            if with_validation:
                valid_visualizer = Visualizer(valid_history, offset_win=10)
        print(self.loss)
        print(self.optimizer)
        folds = range(manager.get_nb_folds())
        if fold_index is not None:
            if isinstance(fold_index, int):
                folds = [fold_index]
            elif isinstance(fold_index, list):
                folds = fold_index
        if epoch_index is None:
            epoch_index = 0
        init_optim_state = deepcopy(self.optimizer.state_dict())
        init_model_state = deepcopy(self.model.state_dict())
        if scheduler is not None:
            init_scheduler_state = deepcopy(scheduler.state_dict())
        for fold in folds:
            # Initialize everything before optimizing on a new fold
            self.optimizer.load_state_dict(init_optim_state)
            self.model.load_state_dict(init_model_state)
            if scheduler is not None:
                scheduler.load_state_dict(init_scheduler_state)
            loader = manager.get_dataloader(
                train=True,
                validation=True,
                fold_index=fold)
            for epoch in range(nb_epochs):
                self.notify_observers("before_epoch", epoch=epoch, fold=fold)
                loss, values = self.train(loader.train, train_visualizer, fold, epoch,
                                          standard_optim=standard_optim,
                                          gpu_time_profiling=gpu_time_profiling, **kwargs_train)

                train_history.log((fold, epoch+epoch_index), loss=loss, **values)
                train_history.summary()
                if scheduler is not None:
                    scheduler.step()
                    print('Scheduler lr: {}'.format(scheduler.get_lr()), flush=True)
                    print('Optimizer lr: %f'%self.optimizer.param_groups[0]['lr'], flush=True)
                if checkpointdir is not None and (epoch % nb_epochs_per_saving == 0 or epoch == nb_epochs-1) \
                        and epoch > 0:
                    checkpoint(
                        model=self.model,
                        epoch=epoch+epoch_index,
                        fold=fold,
                        outdir=checkpointdir,
                        name=exp_name,
                        optimizer=self.optimizer)
                    train_history.save(
                        outdir=checkpointdir,
                        epoch=epoch+epoch_index,
                        fold=fold)
                if with_validation:
                    _, _, _, loss, values = self.test(loader.validation,
                                                      standard_optim=standard_optim, **kwargs_train)
                    valid_history.log((fold, epoch+epoch_index), validation_loss=loss, **values)
                    valid_history.summary()
                    if valid_visualizer is not None:
                        valid_visualizer.refresh_current_metrics()
                    if checkpointdir is not None and (epoch % nb_epochs_per_saving == 0 or epoch == nb_epochs-1) \
                            and epoch > 0:
                        valid_history.save(
                            outdir=checkpointdir,
                            epoch=epoch+epoch_index,
                            fold=fold)
                self.notify_observers("after_epoch", epoch=epoch, fold=fold)
        return train_history, valid_history
コード例 #3
0
    def training(self,
                 manager,
                 nb_epochs,
                 checkpointdir=None,
                 fold_index=None,
                 scheduler=None,
                 with_validation=True,
                 save_after_epochs=1,
                 add_labels=False):
        """ Train the model.

        Parameters
        ----------
        manager: a pynet DataManager
            a manager containing the train and validation data.
        nb_epochs: int, default 100
            the number of epochs.
        checkpointdir: str, default None
            a destination folder where intermediate models/historues will be
            saved.
        fold_index: int, default None
            the index of the fold to use for the training, default use all the
            available folds.
        scheduler: torch.optim.lr_scheduler, default None
            a scheduler used to reduce the learning rate.
        with_validation: bool, default True
            if set use the validation dataset.
        save_after_epochs: int, default 1
            determines when the model is saved and represents the number of
            epochs before saving.

        Returns
        -------
        train_history, valid_history: History
            the train/validation history.
        """
        if self.resume and "scheduler" in self.checkpoint:
            scheduler.load_state_dict(self.checkpoint["scheduler"])
        if checkpointdir is not None and not os.path.isdir(checkpointdir):
            os.mkdir(checkpointdir)
        train_history = History(name="train")
        if with_validation is not None:
            valid_history = History(name="validation")
        else:
            valid_history = None
        logger.info("Loss function {0}.".format(self.loss))
        logger.info("Optimizer function {0}.".format(self.optimizer))
        folds = range(manager.number_of_folds)
        if fold_index is not None:
            folds = [fold_index]
        for fold in folds:
            logger.debug("Running fold {0}...".format(fold))
            reset_weights(self.model, self.checkpoint)
            loaders = manager.get_dataloader(train=True,
                                             validation=with_validation,
                                             fold_index=fold)
            for epoch in range(nb_epochs):
                logger.debug("Running epoch {0}:".format(fold))
                logger.debug("  notify observers with signal 'before_epoch'.")
                self.notify_observers("before_epoch", epoch=epoch, fold=fold)
                observers_kwargs = {}
                logger.debug("  train.")
                loss, values = self.train(loaders.train)
                observers_kwargs["loss"] = loss
                observers_kwargs.update(values)
                if scheduler is not None:
                    logger.debug("  update scheduler.")
                    scheduler.step(loss)
                logger.debug("  update train history.")
                train_history.log((fold, epoch), loss=loss, **values)
                train_history.summary()
                if (checkpointdir is not None
                        and epoch % save_after_epochs == 0):
                    logger.debug("  create checkpoint.")
                    checkpoint(model=self.model,
                               epoch=epoch,
                               fold=fold,
                               outdir=checkpointdir,
                               optimizer=self.optimizer,
                               scheduler=scheduler)
                    train_history.save(outdir=checkpointdir,
                                       epoch=epoch,
                                       fold=fold)
                if with_validation:
                    logger.debug("  validation.")
                    y_pred, loss, values = self.test(loaders.validation)
                    observers_kwargs["val_loss"] = loss
                    observers_kwargs.update(
                        dict(("val_{0}".format(key), val)
                             for key, val in values.items()))
                    observers_kwargs["val_pred"] = y_pred
                    logger.debug("  update validation history.")
                    valid_history.log((fold, epoch), loss=loss, **values)
                    valid_history.summary()
                    if (checkpointdir is not None
                            and epoch % save_after_epochs == 0):
                        logger.debug("  create checkpoint.")
                        valid_history.save(outdir=checkpointdir,
                                           epoch=epoch,
                                           fold=fold)
                logger.debug("  notify observers with signal 'after_epoch'.")
                self.notify_observers("after_epoch",
                                      epoch=epoch,
                                      fold=fold,
                                      **observers_kwargs)
                logger.debug("End epoch.".format(fold))
            logger.debug("End fold.")
        return train_history, valid_history
コード例 #4
0
    "num_classes": 4,
    "activation": "relu",
    "normalization": "group_normalization",
    "mode": "trilinear",
    "with_vae": True,
    "debug": False
}
if os.path.isfile(trained_model):
    nvnet = NvNetSegmenter(nvnet_kwargs,
                           optimizer_name="Adam",
                           learning_rate=1e-4,
                           weight_decay=1e-5,
                           loss=my_loss,
                           pretrained=trained_model,
                           use_cuda=True)
    train_history = History.load(os.path.join(outdir, "train_0_epoch_9.pkl"))
    valid_history = History.load(
        os.path.join(outdir, "validation_0_epoch_9.pkl"))
else:
    nvnet = NvNetSegmenter(nvnet_kwargs,
                           optimizer_name="Adam",
                           learning_rate=1e-4,
                           weight_decay=1e-5,
                           loss=my_loss,
                           use_cuda=True)
    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer=nvnet.optimizer,
                                               mode="min",
                                               factor=0.5,
                                               patience=5)
    train_history, valid_history = nvnet.training(
        manager=manager,
コード例 #5
0
                model=net,
                metrics=["accuracy"])
test_history, train_history = cl.training(manager=manager,
                                          nb_epochs=3,
                                          checkpointdir="/tmp/pynet",
                                          fold_index=0,
                                          with_validation=True)

#############################################################################
# You can reload the optimization history at any time and any step.

from pprint import pprint
from pynet.history import History
from pynet.plotting import plot_history

history = History.load("/tmp/pynet/train_0_epoch_2.pkl")
print(history)
plot_history(history)

#############################################################################
# And now predict the labels on the test set.

import numpy as np
from sklearn.metrics import classification_report
from pynet.plotting import plot_data

y_pred, X, y_true, loss, values = cl.testing(manager=manager,
                                             with_logit=True,
                                             predict=True)
pprint(data.labels)
print(classification_report(y_true, y_pred, target_names=data.labels.values()))
コード例 #6
0
                                            pb=pb,
                                            hyper=hyper,
                                            n_finetuning=n_finetuning,
                                            block=b,
                                            f=fold,
                                            e=e))) for fold in range(folds)
                ]
            else:
                if CV:
                    if pb == 'Alzheimer':
                        filename = "Validation_DenseNet_{pb}_{db}_CV_%i_epoch_{e}.pkl"
                    else:
                        filename = "Validation_DenseNet_{pb}_{db}_%i_epoch_{e}.pkl"
                    results[db][name][n_finetuning] = History.load(
                        os.path.join(root, path.format(n=N_pretraining,
                                                       n_finetune=n_finetuning, pb=pb), filename.
                                     format(db=db, pb=pb, e=e)), folds=list(range(folds))). \
                        to_dict(patterns_to_del=patterns_to_del)
                else:
                    filename = "Test_DenseNet_{pb}_{db}_fold{f}_epoch{e}.pkl"
                    results[db][name][n_finetuning] = [
                        get_pickle_obj(
                            os.path.join(
                                root,
                                path.format(n=N_pretraining,
                                            n_finetune=n_finetuning,
                                            pb=pb),
                                filename.format(db=db, pb=pb, f=fold, e=e)))
                        for fold in range(folds)
                    ]
コード例 #7
0
    def training(self,
                 manager,
                 nb_epochs,
                 checkpointdir=None,
                 fold_index=None,
                 scheduler=None,
                 with_validation=True):
        """ Train the model.

        Parameters
        ----------
        manager: a pynet DataManager
            a manager containing the train and validation data.
        nb_epochs: int, default 100
            the number of epochs.
        checkpointdir: str, default None
            a destination folder where intermediate models/historues will be
            saved.
        fold_index: int, default None
            the index of the fold to use for the training, default use all the
            available folds.
        scheduler: torch.optim.lr_scheduler, default None
            a scheduler used to reduce the learning rate.
        with_validation: bool, default True
            if set use the validation dataset.

        Returns
        -------
        train_history, valid_history: History
            the train/validation history.
        """
        if checkpointdir is not None and not os.path.isdir(checkpointdir):
            os.mkdir(checkpointdir)
        train_history = History(name="train")
        if with_validation is not None:
            valid_history = History(name="validation")
        else:
            valid_history = None
        print(self.loss)
        print(self.optimizer)
        folds = range(manager.number_of_folds)
        if fold_index is not None:
            folds = [fold_index]
        for fold in folds:
            reset_weights(self.model)
            loaders = manager.get_dataloader(train=True,
                                             validation=True,
                                             fold_index=fold)
            for epoch in range(nb_epochs):
                self.notify_observers("before_epoch", epoch=epoch, fold=fold)
                observers_kwargs = {}
                loss, values = self.train(loaders.train)
                observers_kwargs["loss"] = loss
                observers_kwargs.update(values)
                if scheduler is not None:
                    scheduler.step(loss)
                train_history.log((fold, epoch), loss=loss, **values)
                train_history.summary()
                if checkpointdir is not None:
                    checkpoint(model=self.model,
                               epoch=epoch,
                               fold=fold,
                               outdir=checkpointdir,
                               optimizer=self.optimizer)
                    train_history.save(outdir=checkpointdir,
                                       epoch=epoch,
                                       fold=fold)
                if with_validation:
                    _, loss, values = self.test(loaders.validation)
                    observers_kwargs["val_loss"] = loss
                    observers_kwargs.update(
                        dict(("val_{0}".format(key), val)
                             for key, val in values.items()))
                    valid_history.log((fold, epoch), loss=loss, **values)
                    valid_history.summary()
                    if checkpointdir is not None:
                        valid_history.save(outdir=checkpointdir,
                                           epoch=epoch,
                                           fold=fold)
                self.notify_observers("after_epoch",
                                      epoch=epoch,
                                      fold=fold,
                                      **observers_kwargs)
        return train_history, valid_history
コード例 #8
0
test_history, train_history, valid_history = training(net=net,
                                                      dataset=dataset,
                                                      optimizer=optim.Adam(
                                                          net.parameters(),
                                                          lr=0.01),
                                                      criterion=my_loss,
                                                      nb_epochs=3,
                                                      metrics={"mse": my_loss},
                                                      use_cuda=False,
                                                      outdir="/tmp/pynet",
                                                      verbose=1)

#############################################################################
# You can reload the optimization history at any time and any step

from pprint import pprint
from pynet.history import History

valid_history = History.load("/tmp/pynet/history/valid_1_epoch_3.pkl")
pprint(valid_history.history)
pprint(valid_history["loss"])

#############################################################################
# You can finally display the optimization cost

from pynet.plotting import plot_data

x, y = valid_history["loss"]
plot_data(y)