Exemple #1
0
def generate_confusion_matrix(targets: torch.Tensor,
                              predictions: torch.Tensor,
                              writer: Writer.SummaryWriter = None) -> None:
    """Generates a confusion matrix and adds it to the summary writer or shows the plot

    Args:
        targets (torch.Tensor): Tensor of targets
        predictions (torch.Tensor): Tensor of predictions
        writer (Writer.SummaryWriter, optional): Summarywriter. Defaults to None.
    """
    data = {'targets': targets, 'predictions': predictions}
    df = pd.DataFrame(data)
    confusion_matrix = pd.crosstab(df['targets'],
                                   df['predictions'],
                                   rownames=['Actual'],
                                   colnames=['Predicted'],
                                   margins=True)

    sn.heatmap(confusion_matrix, fmt="d", annot=True)

    if writer is not None:
        figure = plt.gcf()
        writer.add_figure('Test/confusion matrix', figure)

    plt.show()
Exemple #2
0
def addCodesGrid(
    inputImages: Union[torch.Tensor, list],
    codes: Union[torch.Tensor, list],
    tag: str,
    step: int,
    labels: Union[torch.Tensor, list] = None,
    writer: SummaryWriter = None,
    mlflowFile: str = None,
):

    images = getTensorList(inputImages)
    fCodes = getTensorList(codes)
    nImages = len(images)
    rows = int(ceil(sqrt(nImages)))
    if not labels is None:
        _labels = getTensorList(labels.cpu())
    else:
        _labels = None

    gridSize = rows**2
    figure = plt.figure(figsize=(10, 10))
    for imgIdx, img in enumerate(images):
        fCode = fCodes[imgIdx]
        if not _labels is None:
            label = _labels[imgIdx]
        else:
            label = None
        title = genTitle(fCode, label)
        plt.subplots_adjust(wspace=1.0, hspace=1.5)
        plt.subplot(rows, rows, imgIdx + 1, title=title)
        plt.xticks([])
        plt.yticks([])
        plt.grid(False)
        pltImg = img.permute(1, 2, 0).squeeze().numpy().clip(0, 1)
        if img.shape[0] == 3:
            plt.imshow(pltImg)
        else:
            plt.imshow(pltImg, cmap=plt.cm.binary)
        if imgIdx == gridSize:
            break

    # mlflow takes precendence over TB writer. Both cannot be used
    if not writer is None and mlflowFile is None:
        writer.add_figure(tag, figure, step)

    if not mlflowFile is None:
        with tempfile.NamedTemporaryFile(prefix=mlflowFile + "_",
                                         suffix=".png") as f:
            plt.savefig(f.name, bbox_inches="tight", format="png")
            os.sync()
            mlflow.log_artifact(f.name)
    plt.close(figure)
    return
Exemple #3
0
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=config['parameters']['lr'])
    criterion = torch.nn.NLLLoss()

    writer = SummaryWriter()
    best_loss = np.inf
    i = 0

    for e in range(config['parameters']['epochs']):
        for imgs, labels in tqdm(dataloader):
            imgs, labels = imgs.to(device), labels.to(device)

            optimizer.zero_grad()
            out = model(imgs)
            loss = criterion(out, labels)
            loss.backward()
            optimizer.step()

            if loss.item() < best_loss:
                best_loss = loss.item()
                torch.save(model.state_dict(), 'best_model.pt')

            fig = plt.figure()
            plt.title('Probability Apple {}/Banana {}'.format(
                torch.exp(out[0][0]), torch.exp(out[0][1])))
            plt.imshow(imgs[0].cpu().numpy().transpose(2, 1, 0))
            writer.add_figure('probability', fig, i)

            writer.add_scalar('train/loss', loss.item(), i)
            i += 1
Exemple #4
0
class TensorBoardLogger(TrainingLogger):
    """
    Logger that also logs information to tensor board.
    """

    def __init__(
        self, n_epochs, log_rate=100, log_directory=None, epoch_begin_callback=None
    ):
        """
        Create a new logger instance.

        Args:
            n_epochs: The number of epochs for which the training will last.
            log_rate: The message rate for output to standard out.
            log_directory: The directory to use for tensorboard output.
            epoch_begin_callback: Callback function the will be called with
                arguments ``writer, model``, where ``writer`` is the current
                ``torch.utils.tensorboard.writer.SummaryWriter`` object used
                used to write output and ``model`` is the model that is being
                in its current state.
        """
        super().__init__(n_epochs, log_rate)
        self.writer = SummaryWriter(log_dir=log_directory)
        self.epoch_begin_callback = epoch_begin_callback
        self.attributes = None

    def set_attributes(self, attributes):
        """
        Stores attributes that describe the training in the logger.
        These will be stored in the logger history.

        Args:
            Dictionary of attributes to store in the history of the
            logger.
        """
        super().set_attributes(attributes)

    def epoch_begin(self, model):
        """
        Called at the beginning of each epoch.

        Args:
            The model that is trained in its current state.
        """
        TrainingLogger.epoch_begin(self, model)
        if self.epoch_begin_callback:
            self.epoch_begin_callback(self.writer, model, self.i_epoch)

    def training_step(self, loss, n_samples, of=None, losses=None):
        """
        Log processing of a training batch. This method should be called
        after each batch is processed so that the logger can keep track
        of training progress.

        Args:
            loss: The loss of the current batch.
            n_samples: The number of samples in the batch.
            of: If available the number of batches in the epoch.
        """
        super().training_step(loss, n_samples, of=of, losses=losses)

    def validation_step(self, loss, n_samples, of=None, losses=None):
        """
        Log processing of a validation batch.

        Args:
            i: The index of the current batch.
            loss: The loss of the current batch.
            n_samples: The number of samples in the batch.
            of: If available the number of batches in the epoch.
        """
        super().validation_step(loss, n_samples, of=of, losses=losses)

    def epoch(self, learning_rate=None, metrics=None):
        """
        Log processing of epoch.

        Args:
            learning_rate: If available the learning rate of the optimizer.
        """
        TrainingLogger.epoch(self, learning_rate, metrics=metrics)
        self.writer.add_scalar("Learning rate", learning_rate, self.i_epoch)

        for name, v in self.history.variables.items():
            if name == "epochs":
                continue
            if len(v.dims) == 1:
                value = v.data[-1]
                self.writer.add_scalar(name, value, self.i_epoch)

        if metrics is not None:
            for m in metrics:
                if hasattr(m, "get_figures"):
                    figures = m.get_figures()
                    if isinstance(figures, dict):
                        for target in figures.keys():
                            f = figures[target]
                            self.writer.add_figure(
                                f"{m.name} ({target})", f, self.i_epoch
                            )
                    else:
                        self.writer.add_figure(f"{m.name}", figures, self.i_epoch)

    def training_end(self):
        """
        Called to signal the end of the training to the logger.
        """
        if self.attributes is not None:
            if self.i_epoch >= self.n_epochs:
                metrics = {}
                for name, v in self.history.variables.items():
                    if name == "epochs":
                        continue
                    if len(v.dims) == 1:
                        metrics[name + "_final"] = v.data[-1]
                self.writer.add_hparams(self.attributes, {}, self.i_epoch)
                self.writer.flush()

    def __del__(self):
        # Extract metric values for hyper parameters.
        super().__del__()