Пример #1
0
    def __call__(self, batch):
        """
        Calling a workflow on a data batch will result in the prediction output being accessible to futher use.

        Having a data batch in a format compatible with Eisen, in other words as a dictionary containing
        keys that can be matches with model input arguments, a workflow can be called on such dictionary and
        return results.

        .. code-block:: python

            workflow = GenericWorkflow(model, gpu=True)

            batch = {'image': np.random.rand(3, 224, 224)}

            prediction, losses, metrics = workflow(batch)

        :param batch: Data batch to be processed by the
        :type batch: dict

        :return: tuple containing outputs, losses and metrics

        """
        model_argument_dict = {
            key: batch[key]
            for key in self.model.input_names
        }

        if self.optimizer is not None:
            self.optimizer.zero_grad()

        outputs = self.model(**model_argument_dict)

        losses = self.compute_losses(merge_two_dicts(batch, outputs))

        if self.optimizer is not None:
            for loss in losses:
                for key in loss.keys():
                    loss[key].backward(retain_graph=True)

            self.optimizer.step()

        metrics = self.compute_metrics(merge_two_dicts(batch, outputs))

        return outputs, losses, metrics
Пример #2
0
    def get_output_dictionary(self, batch):
        """
        Calls the class on the batch and converts output tuple to an output dictionary.
        With Automatic Mixed Precision (AMP) as implemented by PyTorch.

        :param batch: Data batch to be processed by the
        :type batch: dict

        :return: output_dictionary
        """
        model_argument_dict = {
            key: batch[key]
            for key in self.model.input_names
        }

        self.optimizer.zero_grad()

        with autocast():
            outputs = self.model(**model_argument_dict)
            losses = self.compute_losses(merge_two_dicts(batch, outputs))

        for loss in losses:
            for key in loss.keys():
                self.scaler.scale(loss[key]).backward(retain_graph=True)

        self.scaler.step(self.optimizer)

        self.scaler.update()

        metrics = self.compute_metrics(merge_two_dicts(batch, outputs))

        output_dictionary = {
            "inputs": batch,
            "outputs": outputs,
            "losses": losses,
            "metrics": metrics,
            "epoch": self.epoch,
            "model": self.model,
        }

        return output_dictionary
Пример #3
0
    def get_output_dictionary(self, batch):
        """
        Calls the class on the batch and converts output tuple to an output dictionary.
        With Automatic Mixed Precision (AMP) as implemented by AMP.

        :param batch: Data batch to be processed by the
        :type batch: dict

        :return: output_dictionary
        """
        model_argument_dict = {key: batch[key] for key in self.model.input_names}

        self.optimizer.zero_grad()

        outputs = self.model(**model_argument_dict)

        losses = self.compute_losses(merge_two_dicts(batch, outputs))

        for loss in losses:
            for key in loss.keys():
                with amp.scale_loss(loss[key], self.optimizer) as scaled_loss:
                    scaled_loss.backward(retain_graph=True)

        self.optimizer.step()

        metrics = self.compute_metrics(merge_two_dicts(batch, outputs))

        output_dictionary = {
            'inputs': batch,
            'outputs': outputs,
            'losses': losses,
            'metrics': metrics,
            'epoch': self.epoch,
            'model': self.model,
        }

        return output_dictionary