示例#1
0
 def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
     """
     Apply the transform to `img`.
     """
     if isinstance(img, torch.Tensor):
         return img.permute(self.indices or tuple(range(img.ndim)[::-1]))
     return img.transpose(self.indices)  # type: ignore
示例#2
0
文件: array.py 项目: Nic-Ma/MONAI
    def __call__(self, prob_map: NdarrayOrTensor):
        """
        prob_map: the input probabilities map, it must have shape (H[, W, ...]).
        """
        if self.sigma != 0:
            if not isinstance(prob_map, torch.Tensor):
                prob_map = torch.as_tensor(prob_map, dtype=torch.float)
            self.filter.to(prob_map.device)
            prob_map = self.filter(prob_map)

        prob_map_shape = prob_map.shape

        outputs = []
        while prob_map.max() > self.prob_threshold:
            max_idx = unravel_index(prob_map.argmax(), prob_map_shape)
            prob_max = prob_map[tuple(max_idx)]
            max_idx = max_idx.cpu().numpy() if isinstance(
                max_idx, torch.Tensor) else max_idx
            prob_max = prob_max.item() if isinstance(
                prob_max, torch.Tensor) else prob_max
            outputs.append([prob_max] + list(max_idx))

            idx_min_range = (max_idx - self.box_lower_bd).clip(0, None)
            idx_max_range = (max_idx + self.box_upper_bd).clip(
                None, prob_map_shape)
            # for each dimension, set values during index ranges to 0
            slices = tuple(
                slice(idx_min_range[i], idx_max_range[i])
                for i in range(self.spatial_dims))
            prob_map[slices] = 0

        return outputs
示例#3
0
文件: array.py 项目: Nic-Ma/MONAI
    def __call__(self,
                 img: NdarrayOrTensor,
                 randomize: bool = True) -> NdarrayOrTensor:
        """
        Apply the transform to `img`, if `randomize` randomizing the smooth field otherwise reusing the previous.
        """
        img = convert_to_tensor(img, track_meta=get_track_meta())
        if randomize:
            self.randomize()

        if not self._do_transform:
            return img

        img_min = img.min()
        img_max = img.max()
        img_rng = img_max - img_min

        field = self.sfield()
        rfield, *_ = convert_to_dst_type(field, img)

        # everything below here is to be computed using the destination type (numpy, tensor, etc.)

        img = (img - img_min) / (img_rng + 1e-10)  # rescale to unit values
        img = img**rfield  # contrast is changed by raising image data to a power, in this case the field

        out = (img * img_rng
               ) + img_min  # rescale back to the original image value range

        return out
示例#4
0
def searchsorted(a: NdarrayOrTensor, v: NdarrayOrTensor, right=False, sorter=None):
    side = "right" if right else "left"
    if isinstance(a, np.ndarray):
        return np.searchsorted(a, v, side, sorter)  # type: ignore
    if hasattr(torch, "searchsorted"):
        return torch.searchsorted(a, v, right=right)  # type: ignore
    # if using old PyTorch, will convert to numpy array then compute
    ret = np.searchsorted(a.cpu().numpy(), v.cpu().numpy(), side, sorter)  # type: ignore
    ret, *_ = convert_to_dst_type(ret, a)
    return ret
示例#5
0
 def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
     """
     Args:
         img: numpy arrays with required dimension `dim` removed
     """
     if self.dim is None:
         return img.squeeze()
     # for pytorch/numpy unification
     if img.shape[self.dim] != 1:
         raise ValueError("Can only squeeze singleton dimension")
     return img.squeeze(self.dim)
示例#6
0
def ravel(x: NdarrayOrTensor) -> NdarrayOrTensor:
    """`np.ravel` with equivalent implementation for torch.

    Args:
        x: array/tensor to ravel

    Returns:
        Return a contiguous flattened array/tensor.
    """
    if isinstance(x, torch.Tensor):
        if hasattr(torch, "ravel"):  # `ravel` is new in torch 1.8.0
            return x.ravel()
        return x.flatten().contiguous()
    return np.ravel(x)
示例#7
0
文件: utils.py 项目: ahatamiz/MONAI
def assert_allclose(
    actual: NdarrayOrTensor,
    desired: NdarrayOrTensor,
    type_test: Union[bool, str] = True,
    device_test: bool = False,
    *args,
    **kwargs,
):
    """
    Assert that types and all values of two data objects are close.

    Args:
        actual: Pytorch Tensor or numpy array for comparison.
        desired: Pytorch Tensor or numpy array to compare against.
        type_test: whether to test that `actual` and `desired` are both numpy arrays or torch tensors.
            if type_test == "tensor", it checks whether the `actual` is a torch.tensor or metatensor according to
            `get_track_meta`.
        device_test: whether to test the device property.
        args: extra arguments to pass on to `np.testing.assert_allclose`.
        kwargs: extra arguments to pass on to `np.testing.assert_allclose`.


    """
    if isinstance(type_test, str) and type_test == "tensor":
        if get_track_meta():
            np.testing.assert_equal(isinstance(actual, MetaTensor), True,
                                    "must be a MetaTensor")
        else:
            np.testing.assert_equal(
                isinstance(actual, torch.Tensor)
                and not isinstance(actual, MetaTensor), True,
                "must be a torch.Tensor")
    elif type_test:
        # check both actual and desired are of the same type
        np.testing.assert_equal(isinstance(actual, np.ndarray),
                                isinstance(desired, np.ndarray), "numpy type")
        np.testing.assert_equal(isinstance(actual, torch.Tensor),
                                isinstance(desired, torch.Tensor),
                                "torch type")

    if isinstance(desired, torch.Tensor) or isinstance(actual, torch.Tensor):
        if device_test:
            np.testing.assert_equal(str(actual.device), str(desired.device),
                                    "torch device check")  # type: ignore
        actual = actual.detach().cpu().numpy() if isinstance(
            actual, torch.Tensor) else actual
        desired = desired.detach().cpu().numpy() if isinstance(
            desired, torch.Tensor) else desired
    np.testing.assert_allclose(actual, desired, *args, **kwargs)
示例#8
0
    def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
        """
        Filter the image on the `applied_labels`.

        Args:
            img: Pytorch tensor or numpy array of any shape.

        Raises:
            NotImplementedError: The provided image was not a Pytorch Tensor or numpy array.

        Returns:
            Pytorch tensor or numpy array of the same shape as the input.
        """
        if not isinstance(img, (np.ndarray, torch.Tensor)):
            raise NotImplementedError(
                f"{self.__class__} can not handle data of type {type(img)}.")

        if isinstance(img, torch.Tensor):
            if hasattr(torch, "isin"):  # `isin` is new in torch 1.10.0
                appl_lbls = torch.as_tensor(self.applied_labels,
                                            device=img.device)
                return torch.where(torch.isin(img, appl_lbls), img,
                                   torch.tensor(0.0).to(img))
            else:
                out = self(img.detach().cpu().numpy())
                out, *_ = convert_to_dst_type(out, img)
                return out
        return np.asarray(np.where(np.isin(img, self.applied_labels), img, 0))
示例#9
0
def any_np_pt(x: NdarrayOrTensor,
              axis: Union[int, Sequence[int]]) -> NdarrayOrTensor:
    """`np.any` with equivalent implementation for torch.

    For pytorch, convert to boolean for compatibility with older versions.

    Args:
        x: input array/tensor
        axis: axis to perform `any` over

    Returns:
        Return a contiguous flattened array/tensor.
    """
    if isinstance(x, np.ndarray):
        return np.any(x, axis)  # type: ignore

    # pytorch can't handle multiple dimensions to `any` so loop across them
    axis = [axis] if not isinstance(axis, Sequence) else axis
    for ax in axis:
        try:
            x = torch.any(x, ax)
        except RuntimeError:
            # older versions of pytorch require the input to be cast to boolean
            x = torch.any(x.bool(), ax)
    return x
示例#10
0
def swapaxes_boxes(boxes: NdarrayOrTensor, axis1: int, axis2: int):
    """
    Interchange two axes of boxes.

    Args:
        boxes: bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``
        axis1: First axis.
        axis2: Second axis.

    Returns:
        boxes with two axes interchanged.

    """
    spatial_dims: int = get_spatial_dims(boxes=boxes)

    if isinstance(boxes, torch.Tensor):
        boxes_swap = boxes.clone()
    else:
        boxes_swap = deepcopy(boxes)  # type: ignore
    boxes_swap[:, [axis1, axis2]] = boxes_swap[:, [axis2,
                                                   axis1]]  # type: ignore
    boxes_swap[:, [spatial_dims + axis1, spatial_dims +
                   axis2]] = boxes_swap[  # type: ignore
                       :, [spatial_dims + axis2, spatial_dims + axis1]]
    return boxes_swap
示例#11
0
    def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
        # if img has channel dim, squeeze it
        if img.ndim == 4 and img.shape[0] == 1:
            img = img.squeeze(0)

        result = [(img == 1) | (img == 4),
                  (img == 1) | (img == 4) | (img == 2), img == 4]
        # merge labels 1 (tumor non-enh) and 4 (tumor enh) and 2 (large edema) to WT
        # label 4 is ET
        return torch.stack(result, dim=0) if isinstance(
            img, torch.Tensor) else np.stack(result, axis=0)
示例#12
0
def assert_allclose(
    actual: NdarrayOrTensor,
    desired: NdarrayOrTensor,
    type_test: bool = True,
    device_test: bool = False,
    *args,
    **kwargs,
):
    """
    Assert that types and all values of two data objects are close.

    Args:
        actual: Pytorch Tensor or numpy array for comparison.
        desired: Pytorch Tensor or numpy array to compare against.
        type_test: whether to test that `actual` and `desired` are both numpy arrays or torch tensors.
        device_test: whether to test the device property.
        args: extra arguments to pass on to `np.testing.assert_allclose`.
        kwargs: extra arguments to pass on to `np.testing.assert_allclose`.


    """
    if type_test:
        # check both actual and desired are of the same type
        np.testing.assert_equal(isinstance(actual, np.ndarray),
                                isinstance(desired, np.ndarray), "numpy type")
        np.testing.assert_equal(isinstance(actual, torch.Tensor),
                                isinstance(desired, torch.Tensor),
                                "torch type")

    if isinstance(desired, torch.Tensor) or isinstance(actual, torch.Tensor):
        if device_test:
            np.testing.assert_equal(str(actual.device), str(desired.device),
                                    "torch device check")  # type: ignore
        actual = actual.detach().cpu().numpy() if isinstance(
            actual, torch.Tensor) else actual
        desired = desired.detach().cpu().numpy() if isinstance(
            desired, torch.Tensor) else desired
    np.testing.assert_allclose(actual, desired, *args, **kwargs)
示例#13
0
def percentile(x: NdarrayOrTensor, q) -> Union[NdarrayOrTensor, float, int]:
    """`np.percentile` with equivalent implementation for torch.

    Pytorch uses `quantile`, but this functionality is only available from v1.7.
    For earlier methods, we calculate it ourselves. This doesn't do interpolation,
    so is the equivalent of ``numpy.percentile(..., interpolation="nearest")``.

    Args:
        x: input data
        q: percentile to compute (should in range 0 <= q <= 100)

    Returns:
        Resulting value (scalar)
    """
    if np.isscalar(q):
        if not 0 <= q <= 100:
            raise ValueError
    elif any(q < 0) or any(q > 100):
        raise ValueError
    result: Union[NdarrayOrTensor, float, int]
    if isinstance(x, np.ndarray):
        result = np.percentile(x, q)
    else:
        q = torch.tensor(q, device=x.device)
        if hasattr(torch, "quantile"):
            result = torch.quantile(x, q / 100.0)
        else:
            # Note that ``kthvalue()`` works one-based, i.e., the first sorted value
            # corresponds to k=1, not k=0. Thus, we need the `1 +`.
            k = 1 + (0.01 * q * (x.numel() - 1)).round().int()
            if k.numel() > 1:
                r = [x.view(-1).kthvalue(int(_k)).values.item() for _k in k]
                result = torch.tensor(r, device=x.device)
            else:
                result = x.view(-1).kthvalue(int(k)).values.item()

    return result
示例#14
0
def flip_boxes(
    boxes: NdarrayOrTensor,
    spatial_size: Union[Sequence[int], int],
    flip_axes: Optional[Union[Sequence[int], int]] = None,
):
    """
    Flip boxes when the corresponding image is flipped

    Args:
        boxes: bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``
        spatial_size: image spatial size.
        flip_axes: spatial axes along which to flip over. Default is None.
            The default `axis=None` will flip over all of the axes of the input array.
            If axis is negative it counts from the last to the first axis.
            If axis is a tuple of ints, flipping is performed on all of the axes
            specified in the tuple.

    Returns:
        flipped boxes, with same data type as ``boxes``, does not share memory with ``boxes``
    """
    spatial_dims: int = get_spatial_dims(boxes=boxes)
    spatial_size = ensure_tuple_rep(spatial_size, spatial_dims)
    if flip_axes is None:
        flip_axes = tuple(range(0, spatial_dims))
    flip_axes = ensure_tuple(flip_axes)

    # flip box
    if isinstance(boxes, torch.Tensor):
        _flip_boxes = boxes.clone()
    else:
        _flip_boxes = deepcopy(boxes)  # type: ignore

    for axis in flip_axes:
        _flip_boxes[:, axis + spatial_dims] = spatial_size[
            axis] - boxes[:, axis] - TO_REMOVE
        _flip_boxes[:, axis] = spatial_size[
            axis] - boxes[:, axis + spatial_dims] - TO_REMOVE

    return _flip_boxes
示例#15
0
    def __call__(self, image: NdarrayOrTensor) -> NdarrayOrTensor:
        if self.grid_size == (1, 1) and self.patch_size is None:
            if isinstance(image, torch.Tensor):
                return torch.stack([image])
            elif isinstance(image, np.ndarray):
                return np.stack([image])  # type: ignore
            else:
                raise ValueError(
                    f"Input type [{type(image)}] is not supported.")

        patch_size, steps = self.get_params(image.shape[1:])
        patches: NdarrayOrTensor
        if isinstance(image, torch.Tensor):
            patches = (image.unfold(1, patch_size[0], steps[0]).unfold(
                2, patch_size[1],
                steps[1]).flatten(1, 2).transpose(0, 1).contiguous())
        elif isinstance(image, np.ndarray):
            x_step, y_step = steps
            c_stride, x_stride, y_stride = image.strides
            n_channels = image.shape[0]
            patches = as_strided(
                image,
                shape=(*self.grid_size, n_channels, patch_size[0],
                       patch_size[1]),
                strides=(x_stride * x_step, y_stride * y_step, c_stride,
                         x_stride, y_stride),
                writeable=False,
            )
            # flatten the first two dimensions
            patches = patches.reshape(np.prod(patches.shape[:2]),
                                      *patches.shape[2:])
            # make it a contiguous array
            patches = np.ascontiguousarray(patches)
        else:
            raise ValueError(f"Input type [{type(image)}] is not supported.")

        return patches
def write_nifti(
    data: NdarrayOrTensor,
    file_name: str,
    affine: Optional[NdarrayOrTensor] = None,
    target_affine: Optional[np.ndarray] = None,
    resample: bool = True,
    output_spatial_shape: Union[Sequence[int], np.ndarray, None] = None,
    mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
    padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
    align_corners: bool = False,
    dtype: DtypeLike = np.float64,
    output_dtype: DtypeLike = np.float32,
) -> None:
    """
    Write numpy data into NIfTI files to disk.  This function converts data
    into the coordinate system defined by `target_affine` when `target_affine`
    is specified.

    If the coordinate transform between `affine` and `target_affine` could be
    achieved by simply transposing and flipping `data`, no resampling will
    happen.  otherwise this function will resample `data` using the coordinate
    transform computed from `affine` and `target_affine`.  Note that the shape
    of the resampled `data` may subject to some rounding errors. For example,
    resampling a 20x20 pixel image from pixel size (1.5, 1.5)-mm to (3.0,
    3.0)-mm space will return a 10x10-pixel image.  However, resampling a
    20x20-pixel image from pixel size (2.0, 2.0)-mm to (3.0, 3.0)-mma space
    will output a 14x14-pixel image, where the image shape is rounded from
    13.333x13.333 pixels. In this case `output_spatial_shape` could be specified so
    that this function writes image data to a designated shape.

    The saved `affine` matrix follows:
    - If `affine` equals to `target_affine`, save the data with `target_affine`.
    - If `resample=False`, transform `affine` to `new_affine` based on the orientation
    of `target_affine` and save the data with `new_affine`.
    - If `resample=True`, save the data with `target_affine`, if explicitly specify
    the `output_spatial_shape`, the shape of saved data is not computed by `target_affine`.
    - If `target_affine` is None, set `target_affine=affine` and save.
    - If `affine` and `target_affine` are None, the data will be saved with an identity
    matrix as the image affine.

    This function assumes the NIfTI dimension notations.
    Spatially it supports up to three dimensions, that is, H, HW, HWD for
    1D, 2D, 3D respectively.
    When saving multiple time steps or multiple channels `data`, time and/or
    modality axes should be appended after the first three dimensions.  For
    example, shape of 2D eight-class segmentation probabilities to be saved
    could be `(64, 64, 1, 8)`. Also, data in shape (64, 64, 8), (64, 64, 8, 1)
    will be considered as a single-channel 3D image.

    Args:
        data: input data to write to file.
        file_name: expected file name that saved on disk.
        affine: the current affine of `data`. Defaults to `np.eye(4)`
        target_affine: before saving
            the (`data`, `affine`) as a Nifti1Image,
            transform the data into the coordinates defined by `target_affine`.
        resample: whether to run resampling when the target affine
            could not be achieved by swapping/flipping data axes.
        output_spatial_shape: spatial shape of the output image.
            This option is used when resample = True.
        mode: {``"bilinear"``, ``"nearest"``}
            This option is used when ``resample = True``.
            Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
            See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
        padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
            This option is used when ``resample = True``.
            Padding mode for outside grid values. Defaults to ``"border"``.
            See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
        align_corners: Geometrically, we consider the pixels of the input as squares rather than points.
            See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
        dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision.
            If None, use the data type of input data.
        output_dtype: data type for saving data. Defaults to ``np.float32``.
    """
    if isinstance(data, torch.Tensor):
        data, *_ = convert_data_type(data, np.ndarray)
    if isinstance(affine, torch.Tensor):
        affine, *_ = convert_data_type(affine, np.ndarray)
    if not isinstance(data, np.ndarray):
        raise AssertionError("input data must be numpy array or torch tensor.")
    dtype = dtype or data.dtype
    sr = min(data.ndim, 3)
    if affine is None:
        affine = np.eye(4, dtype=np.float64)
    affine = to_affine_nd(sr, affine)  # type: ignore

    if target_affine is None:
        target_affine = affine
    target_affine = to_affine_nd(sr, target_affine)

    if np.allclose(affine, target_affine, atol=1e-3):
        # no affine changes, save (data, affine)
        results_img = nib.Nifti1Image(data.astype(output_dtype),
                                      to_affine_nd(3, target_affine))
        nib.save(results_img, file_name)
        return

    # resolve orientation
    start_ornt = nib.orientations.io_orientation(affine)
    target_ornt = nib.orientations.io_orientation(target_affine)
    ornt_transform = nib.orientations.ornt_transform(start_ornt, target_ornt)
    data_shape = data.shape
    data = nib.orientations.apply_orientation(data, ornt_transform)
    _affine = affine @ nib.orientations.inv_ornt_aff(ornt_transform,
                                                     data_shape)
    if np.allclose(_affine, target_affine, atol=1e-3) or not resample:
        results_img = nib.Nifti1Image(data.astype(output_dtype),
                                      to_affine_nd(3, _affine))  # type: ignore
        nib.save(results_img, file_name)
        return

    # need resampling
    affine_xform = AffineTransform(normalized=False,
                                   mode=mode,
                                   padding_mode=padding_mode,
                                   align_corners=align_corners,
                                   reverse_indexing=True)
    transform = np.linalg.inv(_affine) @ target_affine
    if output_spatial_shape is None:
        output_spatial_shape, _ = compute_shape_offset(data.shape, _affine,
                                                       target_affine)
    output_spatial_shape_ = list(
        output_spatial_shape) if output_spatial_shape is not None else []
    if data.ndim > 3:  # multi channel, resampling each channel
        while len(output_spatial_shape_) < 3:
            output_spatial_shape_ = output_spatial_shape_ + [1]
        spatial_shape, channel_shape = data.shape[:3], data.shape[3:]
        data_np: np.ndarray = data.reshape(list(spatial_shape) +
                                           [-1])  # type: ignore
        data_np = np.moveaxis(data_np, -1, 0)  # channel first for pytorch
        data_torch = affine_xform(
            torch.as_tensor(
                np.ascontiguousarray(data_np).astype(dtype)).unsqueeze(0),
            torch.as_tensor(np.ascontiguousarray(transform).astype(dtype)),
            spatial_size=output_spatial_shape_[:3],
        )
        data_np = data_torch.squeeze(0).detach().cpu().numpy()
        data_np = np.moveaxis(data_np, 0, -1)  # channel last for nifti
        data_np = data_np.reshape(
            list(data_np.shape[:3]) + list(channel_shape))
    else:  # single channel image, need to expand to have batch and channel
        while len(output_spatial_shape_) < len(data.shape):
            output_spatial_shape_ = output_spatial_shape_ + [1]
        data_torch = affine_xform(
            torch.as_tensor(
                np.ascontiguousarray(data).astype(dtype)[None, None]),
            torch.as_tensor(np.ascontiguousarray(transform).astype(dtype)),
            spatial_size=output_spatial_shape_[:len(data.shape)],
        )
        data_np = data_torch.squeeze(0).squeeze(0).detach().cpu().numpy()

    results_img = nib.Nifti1Image(data_np.astype(output_dtype),
                                  to_affine_nd(3, target_affine))
    nib.save(results_img, file_name)
    return