Esempio n. 1
0
    def _compute_model_output_and_loss(self, model_inputs_and_labels: ScalarModelInputsAndLabels) -> \
            Tuple[Tensor, Tensor, Tensor]:
        """
        Computes the output of the model for a given set of inputs and labels.
        Returns a tuple of (logits, posteriors, loss). For multi-GPU computation, the logits are returned
        as a list.
        """
        model = self.train_val_params.model
        label_gpu = self.get_label_tensor(model_inputs_and_labels.labels)
        if self.model_config.use_mixed_precision and self.model_config.use_gpu:
            label_gpu = label_gpu.to(dtype=torch.float16)

        def compute() -> Tuple[Tensor, Tensor, Tensor]:
            if self.in_training_mode:
                model.train()
                logits, posteriors = self.get_logits_and_posteriors(
                    *model_inputs_and_labels.model_inputs)
            else:
                model.eval()
                with torch.no_grad():
                    logits, posteriors = self.get_logits_and_posteriors(
                        *model_inputs_and_labels.model_inputs)
                model.train()
            loss = self.compute_loss(logits, label_gpu)
            return logits, posteriors, loss

        return execute_within_autocast_if_needed(
            func=compute, use_autocast=self.model_config.use_mixed_precision)
    def forward(self, *item: torch.Tensor, **kwargs: Any) -> torch.Tensor:
        def _forward() -> torch.Tensor:
            x = super(ImageEncoderWithMlp, self).forward(*item)
            # pass all the features to the MLP
            x = self.classification_layer(x.view(-1, x.shape[1]))
            return self.final_activation(x)

        return execute_within_autocast_if_needed(func=_forward, use_autocast=self.use_mixed_precision)
    def forward(self, *item: torch.Tensor, **kwargs: Any) -> torch.Tensor:
        def _forward() -> torch.Tensor:
            x = item[0]
            x = self.encode_and_aggregate(x)
            # combine non image features if required
            if self.image_and_non_image_features_aggregator:
                x = self.image_and_non_image_features_aggregator(x, item[1].float())
            return x

        return execute_within_autocast_if_needed(func=_forward, use_autocast=self.use_mixed_precision)
    def forward(self, x: torch.Tensor) -> torch.Tensor:  # type: ignore
        def _forward() -> torch.Tensor:
            # Need to copy to a local variable, because we can't re-assign x here
            x2 = x
            if x2.shape[-3:] != self.expected_image_size_zyx:
                raise ValueError(
                    f"Expected a tensor with trailing size {self.expected_image_size_zyx}, but got "
                    f"{x2.shape}")

            for layer in self._layers.__iter__():
                x2 = layer(x2)
            x2 = x2.view(x2.size(0), -1)
            x2 = self.fc(x2)
            return self.activation(x2)

        # Models that will be used inside of DataParallel need to do their own autocast
        return execute_within_autocast_if_needed(
            _forward, use_autocast=self.use_mixed_precision)
Esempio n. 5
0
    def _compute_loss(
            self, patches: Tensor,
            labels: Optional[Tensor]) -> Tuple[Tensor, Optional[Tensor]]:
        """
        Do a forward pass on the model with the patches as input. If labels are provided, compute the loss.
        Return a tuple of (logits, loss).
        """
        def compute() -> Tuple[Any, Optional[Tensor]]:
            loss: Optional[torch.Tensor] = None
            logits = self.model(patches)
            # If labels *is* None, loss will also be None, which will stop the code below working (and
            # currently correctly triggers mypy errors).
            if labels is not None and self.criterion_fn is not None:
                loss = self.criterion_fn(logits, labels)
            return logits, loss

        return execute_within_autocast_if_needed(
            func=compute, use_autocast=True if self.gradient_scaler else False)