예제 #1
0
    def training(self,
                 manager,
                 nb_epochs,
                 checkpointdir=None,
                 fold_index=None,
                 scheduler=None,
                 with_validation=True,
                 save_after_epochs=1,
                 add_labels=False):
        """ Train the model.

        Parameters
        ----------
        manager: a pynet DataManager
            a manager containing the train and validation data.
        nb_epochs: int, default 100
            the number of epochs.
        checkpointdir: str, default None
            a destination folder where intermediate models/historues will be
            saved.
        fold_index: int, default None
            the index of the fold to use for the training, default use all the
            available folds.
        scheduler: torch.optim.lr_scheduler, default None
            a scheduler used to reduce the learning rate.
        with_validation: bool, default True
            if set use the validation dataset.
        save_after_epochs: int, default 1
            determines when the model is saved and represents the number of
            epochs before saving.

        Returns
        -------
        train_history, valid_history: History
            the train/validation history.
        """
        if self.resume and "scheduler" in self.checkpoint:
            scheduler.load_state_dict(self.checkpoint["scheduler"])
        if checkpointdir is not None and not os.path.isdir(checkpointdir):
            os.mkdir(checkpointdir)
        train_history = History(name="train")
        if with_validation is not None:
            valid_history = History(name="validation")
        else:
            valid_history = None
        logger.info("Loss function {0}.".format(self.loss))
        logger.info("Optimizer function {0}.".format(self.optimizer))
        folds = range(manager.number_of_folds)
        if fold_index is not None:
            folds = [fold_index]
        for fold in folds:
            logger.debug("Running fold {0}...".format(fold))
            reset_weights(self.model, self.checkpoint)
            loaders = manager.get_dataloader(train=True,
                                             validation=with_validation,
                                             fold_index=fold)
            for epoch in range(nb_epochs):
                logger.debug("Running epoch {0}:".format(fold))
                logger.debug("  notify observers with signal 'before_epoch'.")
                self.notify_observers("before_epoch", epoch=epoch, fold=fold)
                observers_kwargs = {}
                logger.debug("  train.")
                loss, values = self.train(loaders.train)
                observers_kwargs["loss"] = loss
                observers_kwargs.update(values)
                if scheduler is not None:
                    logger.debug("  update scheduler.")
                    scheduler.step(loss)
                logger.debug("  update train history.")
                train_history.log((fold, epoch), loss=loss, **values)
                train_history.summary()
                if (checkpointdir is not None
                        and epoch % save_after_epochs == 0):
                    logger.debug("  create checkpoint.")
                    checkpoint(model=self.model,
                               epoch=epoch,
                               fold=fold,
                               outdir=checkpointdir,
                               optimizer=self.optimizer,
                               scheduler=scheduler)
                    train_history.save(outdir=checkpointdir,
                                       epoch=epoch,
                                       fold=fold)
                if with_validation:
                    logger.debug("  validation.")
                    y_pred, loss, values = self.test(loaders.validation)
                    observers_kwargs["val_loss"] = loss
                    observers_kwargs.update(
                        dict(("val_{0}".format(key), val)
                             for key, val in values.items()))
                    observers_kwargs["val_pred"] = y_pred
                    logger.debug("  update validation history.")
                    valid_history.log((fold, epoch), loss=loss, **values)
                    valid_history.summary()
                    if (checkpointdir is not None
                            and epoch % save_after_epochs == 0):
                        logger.debug("  create checkpoint.")
                        valid_history.save(outdir=checkpointdir,
                                           epoch=epoch,
                                           fold=fold)
                logger.debug("  notify observers with signal 'after_epoch'.")
                self.notify_observers("after_epoch",
                                      epoch=epoch,
                                      fold=fold,
                                      **observers_kwargs)
                logger.debug("End epoch.".format(fold))
            logger.debug("End fold.")
        return train_history, valid_history
예제 #2
0
    def training(self,
                 manager,
                 nb_epochs,
                 checkpointdir=None,
                 fold_index=None,
                 scheduler=None,
                 with_validation=True):
        """ Train the model.

        Parameters
        ----------
        manager: a pynet DataManager
            a manager containing the train and validation data.
        nb_epochs: int, default 100
            the number of epochs.
        checkpointdir: str, default None
            a destination folder where intermediate models/historues will be
            saved.
        fold_index: int, default None
            the index of the fold to use for the training, default use all the
            available folds.
        scheduler: torch.optim.lr_scheduler, default None
            a scheduler used to reduce the learning rate.
        with_validation: bool, default True
            if set use the validation dataset.

        Returns
        -------
        train_history, valid_history: History
            the train/validation history.
        """
        if checkpointdir is not None and not os.path.isdir(checkpointdir):
            os.mkdir(checkpointdir)
        train_history = History(name="train")
        if with_validation is not None:
            valid_history = History(name="validation")
        else:
            valid_history = None
        print(self.loss)
        print(self.optimizer)
        folds = range(manager.number_of_folds)
        if fold_index is not None:
            folds = [fold_index]
        for fold in folds:
            reset_weights(self.model)
            loaders = manager.get_dataloader(train=True,
                                             validation=True,
                                             fold_index=fold)
            for epoch in range(nb_epochs):
                self.notify_observers("before_epoch", epoch=epoch, fold=fold)
                observers_kwargs = {}
                loss, values = self.train(loaders.train)
                observers_kwargs["loss"] = loss
                observers_kwargs.update(values)
                if scheduler is not None:
                    scheduler.step(loss)
                train_history.log((fold, epoch), loss=loss, **values)
                train_history.summary()
                if checkpointdir is not None:
                    checkpoint(model=self.model,
                               epoch=epoch,
                               fold=fold,
                               outdir=checkpointdir,
                               optimizer=self.optimizer)
                    train_history.save(outdir=checkpointdir,
                                       epoch=epoch,
                                       fold=fold)
                if with_validation:
                    _, loss, values = self.test(loaders.validation)
                    observers_kwargs["val_loss"] = loss
                    observers_kwargs.update(
                        dict(("val_{0}".format(key), val)
                             for key, val in values.items()))
                    valid_history.log((fold, epoch), loss=loss, **values)
                    valid_history.summary()
                    if checkpointdir is not None:
                        valid_history.save(outdir=checkpointdir,
                                           epoch=epoch,
                                           fold=fold)
                self.notify_observers("after_epoch",
                                      epoch=epoch,
                                      fold=fold,
                                      **observers_kwargs)
        return train_history, valid_history
예제 #3
0
# OK it's not working, let's try different transfer learning strategies.

cl = classifier.ResNet18(
    num_classes=1000,
    pretrained="/neurospin/nsap/torch/models/resnet18-5c106cde.pth",
    batch_size=50,
    optimizer_name="Adam",
    learning_rate=1e-4,
    loss_name="NLLLoss",
    metrics=["accuracy"])
to_freeze_layers = ["conv1", "bn1", "relu", "maxpool", "layer1", "layer2"]
freeze_layers(cl.model, to_freeze_layers)
nb_features = cl.model.fc.in_features
cl.model.fc = nn.Linear(nb_features, 9)
train(cl, dataset)

cl = classifier.ResNet18(
    num_classes=1000,
    pretrained="/neurospin/nsap/torch/models/resnet18-5c106cde.pth",
    batch_size=50,
    optimizer_name="Adam",
    learning_rate=1e-4,
    loss_name="NLLLoss",
    metrics=["accuracy"])
reset_weights(cl.model)
nb_features = cl.model.fc.in_features
cl.model.fc = nn.Linear(nb_features, 9)
train(cl, dataset)

plt.show()
예제 #4
0
    for l in target_layers
]

## Computes and stores the outputs of each network for all the test set
for (exp, pretrained, fine_tuned) in zip(experiments, pretrained_paths,
                                         fine_tuned_models):
    outputs = {
        "outputs":
        {layer: {
            "before": [],
            "after": []
        }
         for layer in target_layers},
        "labels": []
    }
    reset_weights(net1)
    reset_weights(net2)
    if pretrained is not None:
        net1.load_state_dict(torch.load(pretrained)['model'], strict=False)
    net2.load_state_dict(torch.load(fine_tuned)['model'], strict=False)
    print("Pretrained path: {}, Fine-tuned path: {}\n".format(
        pretrained, fine_tuned),
          flush=True)
    pbar = tqdm(total=len(loaders.test), desc="Mini-Batch")
    for it, dataitem in enumerate(loaders.test):
        pbar.update()
        inputs = dataitem.inputs.to(device)
        net1(inputs)
        net2(inputs)
        for i, l in enumerate(target_layers):
            outputs["outputs"][l]["before"].extend(