Ejemplo n.º 1
0
    def __call__(self, data: Mapping[Hashable, Any]) -> Dict[Hashable, Any]:
        d = dict(data)
        for (
                key,
                orig_key,
                meta_key,
                orig_meta_key,
                meta_key_postfix,
                nearest_interp,
                to_tensor,
                device,
                post_func,
        ) in self.key_iterator(
                d,
                self.orig_keys,
                self.meta_keys,
                self.orig_meta_keys,
                self.meta_key_postfix,
                self.nearest_interp,
                self.to_tensor,
                self.device,
                self.post_func,
        ):
            transform_key = f"{orig_key}{InverseKeys.KEY_SUFFIX}"
            if transform_key not in d:
                warnings.warn(
                    f"transform info of `{orig_key}` is not available or no InvertibleTransform applied."
                )
                continue

            transform_info = d[transform_key]
            if nearest_interp:
                transform_info = convert_inverse_interp_mode(
                    trans_info=deepcopy(transform_info),
                    mode="nearest",
                    align_corners=None)

            input = d[key]
            if isinstance(input, torch.Tensor):
                input = input.detach()
            # construct the input dict data for BatchInverseTransform
            input_dict = {orig_key: input, transform_key: transform_info}
            orig_meta_key = orig_meta_key or f"{orig_key}_{meta_key_postfix}"
            meta_key = meta_key or f"{key}_{meta_key_postfix}"
            if orig_meta_key in d:
                input_dict[orig_meta_key] = d[orig_meta_key]

            with allow_missing_keys_mode(self.transform):  # type: ignore
                inverted = self.transform.inverse(input_dict)

            # save the inverted data
            d[key] = post_func(
                self._totensor(inverted[orig_key]).
                to(device) if to_tensor else inverted[orig_key])
            # save the inverted meta dict
            if orig_meta_key in d:
                d[meta_key] = inverted.get(orig_meta_key)

        return d
Ejemplo n.º 2
0
    def __call__(
        self,
        data: Dict[str, Any],
        num_examples: int = 10
    ) -> Union[Tuple[np.ndarray, np.ndarray, np.ndarray, float], np.ndarray]:
        """
        Args:
            data: dictionary data to be processed.
            num_examples: number of realisations to be processed and results combined.

        Returns:
            - if `return_full_data==False`: mode, mean, std, vvc. The mode, mean and standard deviation are calculated
                across `num_examples` outputs at each voxel. The volume variation coefficient (VVC) is `std/mean`
                across the whole output, including `num_examples`. See original paper for clarification.
            - if `return_full_data==False`: data is returned as-is after applying the `inferrer_fn` and then
                concatenating across the first dimension containing `num_examples`. This allows the user to perform
                their own analysis if desired.
        """
        d = dict(data)

        # check num examples is multiple of batch size
        if num_examples % self.batch_size != 0:
            raise ValueError("num_examples should be multiple of batch size.")

        # generate batch of data of size == batch_size, dataset and dataloader
        data_in = [d] * num_examples
        ds = Dataset(data_in, self.transform)
        dl = DataLoader(ds,
                        self.num_workers,
                        batch_size=self.batch_size,
                        collate_fn=pad_list_data_collate)

        label_transform_key = self.label_key + InverseKeys.KEY_SUFFIX

        # create inverter
        inverter = BatchInverseTransform(self.transform,
                                         dl,
                                         collate_fn=list_data_collate)

        outputs: List[np.ndarray] = []

        for batch_data in tqdm(dl) if self.progress else dl:
            batch_images = batch_data[self.image_key].to(self.device)

            # do model forward pass
            batch_output = self.inferrer_fn(batch_images)
            if isinstance(batch_output, torch.Tensor):
                batch_output = batch_output.detach().cpu()
            if isinstance(batch_output, np.ndarray):
                batch_output = torch.Tensor(batch_output)

            # create a dictionary containing the inferred batch and their transforms
            inferred_dict = {
                self.label_key: batch_output,
                label_transform_key: batch_data[label_transform_key]
            }
            # if meta dict is present, add that too (required for some inverse transforms)
            label_meta_dict_key = self.meta_keys or f"{self.label_key}_{self.meta_key_postfix}"
            if label_meta_dict_key in batch_data:
                inferred_dict[label_meta_dict_key] = batch_data[
                    label_meta_dict_key]

            # do inverse transformation (allow missing keys as only inverting label)
            with allow_missing_keys_mode(self.transform):  # type: ignore
                inv_batch = inverter(inferred_dict)

            # append
            outputs.append(inv_batch[self.label_key])

        # output
        output: np.ndarray = np.concatenate(outputs)

        if self.return_full_data:
            return output

        # calculate metrics
        mode = np.array(
            torch.mode(torch.Tensor(output.astype(np.int64)), dim=0).values)
        mean: np.ndarray = np.mean(output, axis=0)  # type: ignore
        std: np.ndarray = np.std(output, axis=0)  # type: ignore
        vvc: float = (np.std(output) / np.mean(output)).item()
        return mode, mean, std, vvc
Ejemplo n.º 3
0
    def __call__(self, data: Mapping[Hashable, Any]) -> Dict[Hashable, Any]:
        d = dict(data)
        for (
                key,
                orig_key,
                meta_key,
                orig_meta_key,
                meta_key_postfix,
                nearest_interp,
                to_tensor,
                device,
                post_func,
        ) in self.key_iterator(
                d,
                self.orig_keys,
                self.meta_keys,
                self.orig_meta_keys,
                self.meta_key_postfix,
                self.nearest_interp,
                self.to_tensor,
                self.device,
                self.post_func,
        ):
            if isinstance(d[key], MetaTensor):
                if orig_key not in d:
                    warnings.warn(
                        f"transform info of `{orig_key}` is not available in MetaTensor {key}."
                    )
                    continue
            else:
                transform_key = InvertibleTransform.trace_key(orig_key)
                if transform_key not in d:
                    warnings.warn(
                        f"transform info of `{orig_key}` is not available or no InvertibleTransform applied."
                    )
                    continue

            orig_meta_key = orig_meta_key or f"{orig_key}_{meta_key_postfix}"
            if orig_key in d and isinstance(d[orig_key], MetaTensor):
                transform_info = d[orig_key].applied_operations
                meta_info = d[orig_key].meta
            else:
                transform_info = d[InvertibleTransform.trace_key(orig_key)]
                meta_info = d.get(orig_meta_key, {})
            if nearest_interp:
                transform_info = convert_applied_interp_mode(
                    trans_info=transform_info,
                    mode="nearest",
                    align_corners=None)

            inputs = d[key]
            if isinstance(inputs, torch.Tensor):
                inputs = inputs.detach()

            if not isinstance(inputs, MetaTensor):
                inputs = convert_to_tensor(inputs, track_meta=True)
            inputs.applied_operations = deepcopy(transform_info)
            inputs.meta = deepcopy(meta_info)

            # construct the input dict data
            input_dict = {orig_key: inputs}
            if config.USE_META_DICT:
                input_dict[InvertibleTransform.trace_key(
                    orig_key)] = transform_info
                input_dict[PostFix.meta(orig_key)] = meta_info
            with allow_missing_keys_mode(self.transform):  # type: ignore
                inverted = self.transform.inverse(input_dict)

            # save the inverted data
            if to_tensor and not isinstance(inverted[orig_key], MetaTensor):
                inverted_data = self._totensor(inverted[orig_key])
            else:
                inverted_data = inverted[orig_key]
            d[key] = post_func(inverted_data.to(device))
            # save the invertd applied_operations if it's in the source dict
            if InvertibleTransform.trace_key(orig_key) in d:
                d[InvertibleTransform.trace_key(
                    orig_key)] = inverted_data.applied_operations
            # save the inverted meta dict if it's in the source dict
            if orig_meta_key in d:
                meta_key = meta_key or f"{key}_{meta_key_postfix}"
                d[meta_key] = inverted.get(orig_meta_key)
        return d