Exemplo n.º 1
0
    def predict_on_batch(self, data, iterations=1, cuda=False):
        """
        Get the model's prediction on a batch.

        Args:
            data (Tensor): The model input.
            iterations (int): Number of prediction to perform.
            cuda (bool): Use CUDA or not.

        Returns:
            Tensor, the loss computed from the criterion.
                    shape = {batch_size, nclass, n_iteration}.

        Raises:
            Raises RuntimeError if CUDA rans out of memory during data replication.
        """
        with torch.no_grad():
            if cuda:
                data = to_cuda(data)
            if self.replicate_in_memory:
                data = map_on_tensor(lambda d: stack_in_memory(d, iterations), data)
                try:
                    out = self.model(data)
                except RuntimeError as e:
                    raise RuntimeError(
                        '''CUDA ran out of memory while BaaL tried to replicate data. See the exception above.
                    Use `replicate_in_memory=False` in order to reduce the memory requirements.
                    Note that there will be some speed trade-offs''') from e
                out = map_on_tensor(lambda o: o.view([iterations, -1, *o.size()[1:]]), out)
                out = map_on_tensor(lambda o: o.permute(1, 2, *range(3, o.ndimension()), 0), out)
            else:
                out = [self.model(data) for _ in range(iterations)]
                out = _stack_preds(out)
            return out
Exemplo n.º 2
0
def test_stack_in_memory_multi(a_tensor):
    iterations = 10
    t = [a_tensor, a_tensor]
    out = map_on_tensor(
        lambda ti: array_utils.stack_in_memory(ti, iterations=iterations), t)
    assert out[0].shape == (10 * iterations, 3, 32, 32)
    assert out[1].shape == (10 * iterations, 3, 32, 32)
    def predict_on_dataset_generator(
        self,
        dataset,
        iterations: int = 1,
        half: bool = False,
        ignore_keys: Optional[List[str]] = None,
    ):
        """
        Use the model to predict on a dataset `iterations` time.

        Args:
            dataset (Dataset): Dataset to predict on.
            iterations (int): Number of iterations per sample.
            half (bool): If True use half precision.
            ignore_keys (Optional[List[str]]): A list of keys in the output of your model
                (if it is a dictionary) that should be ignored when gathering predictions.
        Notes:
            The "batch" is made of `batch_size` * `iterations` samples.

        Returns:
            Generators [batch_size, n_classes, ..., n_iterations].
        """

        dataloader = self.get_eval_dataloader(dataset)
        log.info("Start Predict", dataset=len(dataset))

        model = self.model

        model.eval()
        for step, inputs in enumerate(tqdm(dataloader)):
            inputs = map_on_tensor(
                lambda element: map_on_tensor(
                    lambda d: stack_in_memory(d, iterations), element),
                inputs,
            )
            _, out, _ = self.prediction_step(model,
                                             inputs,
                                             prediction_loss_only=False,
                                             ignore_keys=ignore_keys)

            out = map_on_tensor(
                lambda o: o.view([iterations, -1, *o.size()[1:]]), out)
            out = map_on_tensor(
                lambda o: o.permute(1, 2, *range(3, o.ndimension()), 0), out)
            out = map_on_tensor(lambda x: x.detach(), out)
            if half:
                out = map_on_tensor(lambda x: x.half(), out)
            yield map_on_tensor(lambda x: x.cpu().numpy(), out)
Exemplo n.º 4
0
def test_stack_in_memory_single(a_tensor):
    iterations = 10
    out = array_utils.stack_in_memory(a_tensor, iterations=iterations)
    assert out.shape == (10 * iterations, 3, 32, 32)