Beispiel #1
0
    def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
        """
        Filter the image on the `applied_labels`.

        Args:
            img: Pytorch tensor or numpy array of any shape.

        Raises:
            NotImplementedError: The provided image was not a Pytorch Tensor or numpy array.

        Returns:
            Pytorch tensor or numpy array of the same shape as the input.
        """
        if not isinstance(img, (np.ndarray, torch.Tensor)):
            raise NotImplementedError(
                f"{self.__class__} can not handle data of type {type(img)}.")

        if isinstance(img, torch.Tensor):
            img = convert_to_tensor(img, track_meta=get_track_meta())
            img_ = convert_to_tensor(img, track_meta=False)
            if hasattr(torch, "isin"):  # `isin` is new in torch 1.10.0
                appl_lbls = torch.as_tensor(self.applied_labels,
                                            device=img_.device)
                out = torch.where(torch.isin(img_, appl_lbls), img_,
                                  torch.tensor(0.0).to(img_))
                return convert_to_dst_type(out, dst=img)[0]
            out: NdarrayOrTensor = self(
                img_.detach().cpu().numpy())  # type: ignore
            out = convert_to_dst_type(out, img)[0]  # type: ignore
            return out
        return np.asarray(np.where(np.isin(img, self.applied_labels), img, 0))
Beispiel #2
0
def convert_mask_to_box(
        boxes_mask: NdarrayOrTensor,
        bg_label: int = -1,
        box_dtype=torch.float32,
        label_dtype=torch.long) -> Tuple[NdarrayOrTensor, NdarrayOrTensor]:
    """
    Convert int16 mask image to box, which has the same size with the input image

    Args:
        boxes_mask: int16 array, sized (num_box, H, W). Each channel represents a box.
            The foreground region in channel c has intensity of labels[c].
            The background intensity is bg_label.
        bg_label: background labels for the boxes_mask
        box_dtype: output dtype for boxes
        label_dtype: output dtype for labels

    Return:
        - bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``.
        - classification foreground(fg) labels, dtype should be int, sized (N,).
    """
    look_up_option(len(boxes_mask.shape), [3, 4])
    spatial_size = list(boxes_mask.shape[1:])
    spatial_dims = get_spatial_dims(spatial_size=spatial_size)

    boxes_mask_np, *_ = convert_data_type(boxes_mask, np.ndarray)

    boxes_list = []
    labels_list = []
    for b in range(boxes_mask_np.shape[0]):
        fg_indices = np.nonzero(boxes_mask_np[b, ...] - bg_label)
        if fg_indices[0].shape[0] == 0:
            continue
        boxes_b = []
        for fd_i in fg_indices:
            boxes_b.append(min(fd_i))  # top left corner
        for fd_i in fg_indices:
            boxes_b.append(max(fd_i) + 1 - TO_REMOVE)  # bottom right corner
        boxes_list.append(boxes_b)
        if spatial_dims == 2:
            labels_list.append(boxes_mask_np[b, fg_indices[0][0],
                                             fg_indices[1][0]])
        if spatial_dims == 3:
            labels_list.append(boxes_mask_np[b, fg_indices[0][0],
                                             fg_indices[1][0],
                                             fg_indices[2][0]])

    if len(boxes_list) == 0:
        boxes_np, labels_np = np.zeros([0, 2 * spatial_dims]), np.zeros([0])
    else:
        boxes_np, labels_np = np.asarray(boxes_list), np.asarray(labels_list)
    boxes, *_ = convert_to_dst_type(src=boxes_np,
                                    dst=boxes_mask,
                                    dtype=box_dtype)
    labels, *_ = convert_to_dst_type(src=labels_np,
                                     dst=boxes_mask,
                                     dtype=label_dtype)
    return boxes, labels
Beispiel #3
0
def batched_nms(
    boxes: NdarrayOrTensor,
    scores: NdarrayOrTensor,
    labels: NdarrayOrTensor,
    nms_thresh: float,
    max_proposals: int = -1,
    box_overlap_metric: Callable = box_iou,
) -> NdarrayOrTensor:
    """
    Performs non-maximum suppression in a batched fashion.
    Each labels value correspond to a category, and NMS will not be applied between elements of different categories.

    Adapted from https://github.com/MIC-DKFZ/nnDetection/blob/main/nndet/core/boxes/nms.py

    Args:
        boxes: bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``
        scores: prediction scores of the boxes, sized (N,). This function keeps boxes with higher scores.
        labels: indices of the categories for each one of the boxes. sized(N,), value range is (0, num_classes)
        nms_thresh: threshold of NMS. Discards all overlapping boxes with box_overlap > nms_thresh.
        max_proposals: maximum number of boxes it keeps.
            If ``max_proposals`` = -1, there is no limit on the number of boxes that are kept.
        box_overlap_metric: the metric to compute overlap between boxes.

    Returns:
        Indexes of ``boxes`` that are kept after NMS.
    """
    # returns empty array if boxes is empty
    if boxes.shape[0] == 0:
        return convert_to_dst_type(src=np.array([]),
                                   dst=boxes,
                                   dtype=torch.long)[0]

    # convert numpy to tensor if needed
    boxes_t, *_ = convert_data_type(boxes, torch.Tensor, dtype=torch.float32)
    scores_t, *_ = convert_to_dst_type(scores, boxes_t)
    labels_t, *_ = convert_to_dst_type(labels, boxes_t, dtype=torch.long)

    # strategy: in order to perform NMS independently per class.
    # we add an offset to all the boxes. The offset is dependent
    # only on the class idx, and is large enough so that boxes
    # from different classes do not overlap
    max_coordinate = boxes_t.max()
    offsets = labels_t.to(boxes_t) * (max_coordinate + 1)
    boxes_for_nms = boxes + offsets[:, None]
    keep = non_max_suppression(boxes_for_nms, scores_t, nms_thresh,
                               max_proposals, box_overlap_metric)

    # convert tensor back to numpy if needed
    return convert_to_dst_type(src=keep, dst=boxes, dtype=keep.dtype)[0]
Beispiel #4
0
    def __call__(self,
                 img: NdarrayOrTensor,
                 randomize: bool = True) -> NdarrayOrTensor:
        """
        Apply the transform to `img`, if `randomize` randomizing the smooth field otherwise reusing the previous.
        """
        img = convert_to_tensor(img, track_meta=get_track_meta())
        if randomize:
            self.randomize()

        if not self._do_transform:
            return img

        img_min = img.min()
        img_max = img.max()
        img_rng = img_max - img_min

        field = self.sfield()
        rfield, *_ = convert_to_dst_type(field, img)

        # everything below here is to be computed using the destination type (numpy, tensor, etc.)

        img = (img - img_min) / (img_rng + 1e-10)  # rescale to unit values
        img = img**rfield  # contrast is changed by raising image data to a power, in this case the field

        out = (img * img_rng
               ) + img_min  # rescale back to the original image value range

        return out
Beispiel #5
0
    def __call__(self,
                 img: NdarrayOrTensor,
                 randomize: bool = True,
                 device: Optional[torch.device] = None) -> NdarrayOrTensor:
        img = convert_to_tensor(img, track_meta=get_track_meta())
        if randomize:
            self.randomize()

        if not self._do_transform:
            return img

        device = device if device is not None else self.device

        field = self.sfield()

        dgrid = self.grid + field.to(self.grid_dtype)
        dgrid = moveaxis(dgrid, 1, -1)  # type: ignore

        img_t = convert_to_tensor(img[None], torch.float32, device)

        out = grid_sample(
            input=img_t,
            grid=dgrid,
            mode=look_up_option(self.grid_mode, GridSampleMode),
            align_corners=self.grid_align_corners,
            padding_mode=look_up_option(self.grid_padding_mode,
                                        GridSamplePadMode),
        )

        out_t, *_ = convert_to_dst_type(out.squeeze(0), img)

        return out_t
Beispiel #6
0
    def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
        """
        Args:
            img: torch tensor data to extract the contour, with shape: [channels, height, width[, depth]]

        Raises:
            ValueError: When ``image`` ndim is not one of [3, 4].

        Returns:
            A torch tensor with the same shape as img, note:
                1. it's the binary classification result of whether a pixel is edge or not.
                2. in order to keep the original shape of mask image, we use padding as default.
                3. the edge detection is just approximate because it defects inherent to Laplace kernel,
                   ideally the edge should be thin enough, but now it has a thickness.

        """
        img = convert_to_tensor(img, track_meta=get_track_meta())
        img_: torch.Tensor = convert_to_tensor(img, track_meta=False)
        spatial_dims = len(img_.shape) - 1
        img_ = img_.unsqueeze(0)  # adds a batch dim
        if spatial_dims == 2:
            kernel = torch.tensor([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]],
                                  dtype=torch.float32)
        elif spatial_dims == 3:
            kernel = -1.0 * torch.ones(3, 3, 3, dtype=torch.float32)
            kernel[1, 1, 1] = 26.0
        else:
            raise ValueError(
                f"{self.__class__} can only handle 2D or 3D images.")
        contour_img = apply_filter(img_, kernel)
        contour_img.clamp_(min=0.0, max=1.0)
        output, *_ = convert_to_dst_type(contour_img.squeeze(0), img)
        return output
Beispiel #7
0
 def get_label_rgb(cmap: str, label: NdarrayOrTensor):
     _cmap = cm.get_cmap(cmap)
     label_np, *_ = convert_data_type(label, np.ndarray)
     label_rgb_np = _cmap(label_np[0])
     label_rgb_np = np.moveaxis(label_rgb_np, -1, 0)[:3]
     label_rgb, *_ = convert_to_dst_type(label_rgb_np, label)
     return label_rgb
def percentile(x: NdarrayOrTensor,
               q,
               dim: Optional[int] = None,
               keepdim: bool = False,
               **kwargs) -> Union[NdarrayOrTensor, float, int]:
    """`np.percentile` with equivalent implementation for torch.

    Pytorch uses `quantile`. For more details please refer to:
    https://pytorch.org/docs/stable/generated/torch.quantile.html.
    https://numpy.org/doc/stable/reference/generated/numpy.percentile.html.

    Args:
        x: input data
        q: percentile to compute (should in range 0 <= q <= 100)
        dim: the dim along which the percentiles are computed. default is to compute the percentile
            along a flattened version of the array.
        keepdim: whether the output data has dim retained or not.
        kwargs: if `x` is numpy array, additional args for `np.percentile`, more details:
            https://numpy.org/doc/stable/reference/generated/numpy.percentile.html.

    Returns:
        Resulting value (scalar)
    """
    if np.isscalar(q):
        if not 0 <= q <= 100:  # type: ignore
            raise ValueError
    elif any(q < 0) or any(q > 100):
        raise ValueError
    result: Union[NdarrayOrTensor, float, int]
    if isinstance(x, np.ndarray):
        result = np.percentile(x, q, axis=dim, keepdims=keepdim, **kwargs)
    else:
        q = convert_to_dst_type(q / 100.0, x)[0]
        result = torch.quantile(x, q, dim=dim, keepdim=keepdim)
    return result
Beispiel #9
0
    def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
        """
        Filter the image on the `applied_labels`.

        Args:
            img: Pytorch tensor or numpy array of any shape.

        Raises:
            NotImplementedError: The provided image was not a Pytorch Tensor or numpy array.

        Returns:
            Pytorch tensor or numpy array of the same shape as the input.
        """
        if not isinstance(img, (np.ndarray, torch.Tensor)):
            raise NotImplementedError(f"{self.__class__} can not handle data of type {type(img)}.")

        if isinstance(img, torch.Tensor):
            if hasattr(torch, "isin"):
                appl_lbls = torch.as_tensor(self.applied_labels, device=img.device)
                return torch.where(torch.isin(img, appl_lbls), img, 0)
            else:
                out = self(img.detach().cpu().numpy())
                out, *_ = convert_to_dst_type(out, img)
                return out
        return np.asarray(np.where(np.isin(img, self.applied_labels), img, 0))
Beispiel #10
0
def select_labels(labels: Union[Sequence[NdarrayOrTensor], NdarrayOrTensor],
                  keep: NdarrayOrTensor) -> Union[Tuple, NdarrayOrTensor]:
    """
    For element in labels, select indice keep from it.

    Args:
        labels: Sequence of array. Each element represents classification labels or scores
            corresponding to ``boxes``, sized (N,).
        keep: the indices to keep, same length with each element in labels.

    Return:
        selected labels, does not share memory with original labels.
    """
    labels_tuple = ensure_tuple(labels, True)

    labels_select_list = []
    keep_t: torch.Tensor = convert_data_type(keep, torch.Tensor)[0]
    for i in range(len(labels_tuple)):
        labels_t: torch.Tensor = convert_data_type(labels_tuple[i],
                                                   torch.Tensor)[0]
        labels_t = labels_t[keep_t, ...]
        labels_select_list.append(
            convert_to_dst_type(src=labels_t, dst=labels_tuple[i])[0])

    if isinstance(labels, (torch.Tensor, np.ndarray)):
        return labels_select_list[0]  # type: ignore

    return tuple(labels_select_list)
Beispiel #11
0
 def post_convert(
     img: torch.Tensor, orig_img: Union[Sequence[NdarrayOrTensor],
                                        NdarrayOrTensor]
 ) -> NdarrayOrTensor:
     orig_img_ = orig_img[0] if isinstance(orig_img, Sequence) else orig_img
     out, *_ = convert_to_dst_type(img, orig_img_)
     return out
Beispiel #12
0
def convert_box_mode(
    boxes: NdarrayOrTensor,
    src_mode: Union[str, BoxMode, Type[BoxMode], None] = None,
    dst_mode: Union[str, BoxMode, Type[BoxMode], None] = None,
) -> NdarrayOrTensor:
    """
    This function converts the boxes in src_mode to the dst_mode.

    Args:
        boxes: source bounding boxes, Nx4 or Nx6 torch tensor or ndarray.
        src_mode: source box mode. If it is not given, this func will assume it is ``StandardMode()``.
            It follows the same format with ``mode`` in :func:`~monai.data.box_utils.get_boxmode`.
        dst_mode: target box mode. If it is not given, this func will assume it is ``StandardMode()``.
            It follows the same format with ``mode`` in :func:`~monai.data.box_utils.get_boxmode`.

    Returns:
        bounding boxes with target mode, with same data type as ``boxes``, does not share memory with ``boxes``

    Example:
        .. code-block:: python

            boxes = torch.ones(10,4)
            # The following three lines are equivalent
            # They convert boxes with format [xmin, ymin, xmax, ymax] to [xcenter, ycenter, xsize, ysize].
            convert_box_mode(boxes=boxes, src_mode="xyxy", dst_mode="ccwh")
            convert_box_mode(boxes=boxes, src_mode="xyxy", dst_mode=monai.data.box_utils.CenterSizeMode)
            convert_box_mode(boxes=boxes, src_mode="xyxy", dst_mode=monai.data.box_utils.CenterSizeMode())
    """
    src_boxmode = get_boxmode(src_mode)
    dst_boxmode = get_boxmode(dst_mode)

    # if mode not changed, deepcopy the original boxes
    if isinstance(src_boxmode, type(dst_boxmode)):
        return deepcopy(boxes)

    # convert box mode
    # convert numpy to tensor if needed
    boxes_t, *_ = convert_data_type(boxes, torch.Tensor)

    # convert boxes to corners
    corners = src_boxmode.boxes_to_corners(boxes_t)

    # check validity of corners
    spatial_dims = get_spatial_dims(boxes=boxes_t)
    for axis in range(0, spatial_dims):
        if (corners[spatial_dims + axis] < corners[axis]).sum() > 0:
            warnings.warn(
                "Given boxes has invalid values. The box size must be non-negative."
            )

    # convert corners to boxes
    boxes_t_dst = dst_boxmode.corners_to_boxes(corners)

    # convert tensor back to numpy if needed
    boxes_dst, *_ = convert_to_dst_type(src=boxes_t_dst, dst=boxes)
    return boxes_dst
Beispiel #13
0
    def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
        """
        Args:
            img: shape must be (C, spatial_dim1[, spatial_dim2, ...]).

        Returns:
            An array with shape (C, spatial_dim1[, spatial_dim2, ...]).
        """
        is_onehot = img.shape[
            0] > 1 if self.is_onehot is None else self.is_onehot
        if self.applied_labels is not None:
            applied_labels = self.applied_labels
        else:
            applied_labels = tuple(get_unique_labels(img, is_onehot,
                                                     discard=0))
        img = convert_to_tensor(img, track_meta=get_track_meta())
        img_: torch.Tensor = convert_to_tensor(img, track_meta=False)
        if self.independent:
            for i in applied_labels:
                foreground = img_[i] > 0 if is_onehot else img_[0] == i
                mask = get_largest_connected_component_mask(
                    foreground, self.connectivity)
                if is_onehot:
                    img_[i][foreground != mask] = 0
                else:
                    img_[0][foreground != mask] = 0
            return convert_to_dst_type(img_, dst=img)[0]
        if not is_onehot:  # not one-hot, union of labels
            labels, *_ = convert_to_dst_type(applied_labels,
                                             dst=img_,
                                             wrap_sequence=True)
            foreground = (img_[..., None] == labels).any(-1)[0]
            mask = get_largest_connected_component_mask(
                foreground, self.connectivity)
            img_[0][foreground != mask] = 0
            return convert_to_dst_type(img_, dst=img)[0]
        # one-hot, union of labels
        foreground = (img_[applied_labels, ...] == 1).any(0)
        mask = get_largest_connected_component_mask(foreground,
                                                    self.connectivity)
        for i in applied_labels:
            img_[i][foreground != mask] = 0
        return convert_to_dst_type(img_, dst=img)[0]
Beispiel #14
0
    def __call__(self, img: NdarrayOrTensor):
        """
        Args:
            img: PyTorch Tensor data for the TorchVision transform.

        """
        img_t, *_ = convert_data_type(img, torch.Tensor)  # type: ignore
        out = self.trans(img_t)
        out, *_ = convert_to_dst_type(src=out, dst=img)
        return out
Beispiel #15
0
def apply_affine_to_boxes(boxes: NdarrayOrTensor,
                          affine: NdarrayOrTensor) -> NdarrayOrTensor:
    """
    This function applies affine matrices to the boxes

    Args:
        boxes: bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be StandardMode
        affine: affine matrix to be applied to the box coordinates, sized (spatial_dims+1,spatial_dims+1)

    Returns:
        returned affine transformed boxes, with same data type as ``boxes``, does not share memory with ``boxes``
    """

    # convert numpy to tensor if needed
    boxes_t, *_ = convert_data_type(boxes, torch.Tensor)

    # some operation does not support torch.float16
    # convert to float32

    boxes_t = boxes_t.to(dtype=COMPUTE_DTYPE)
    affine_t, *_ = convert_to_dst_type(src=affine, dst=boxes_t)

    spatial_dims = get_spatial_dims(boxes=boxes_t)

    # affine transform left top and bottom right points
    # might flipped, thus lt may not be left top any more
    lt: torch.Tensor = _apply_affine_to_points(boxes_t[:, :spatial_dims],
                                               affine_t,
                                               include_shift=True)
    rb: torch.Tensor = _apply_affine_to_points(boxes_t[:, spatial_dims:],
                                               affine_t,
                                               include_shift=True)

    # make sure lt_new is left top, and rb_new is bottom right
    lt_new, _ = torch.min(torch.stack([lt, rb], dim=2), dim=2)
    rb_new, _ = torch.max(torch.stack([lt, rb], dim=2), dim=2)

    boxes_t_affine = torch.cat([lt_new, rb_new], dim=1)

    # convert tensor back to numpy if needed
    boxes_affine: NdarrayOrTensor
    boxes_affine, *_ = convert_to_dst_type(src=boxes_t_affine, dst=boxes)
    return boxes_affine
Beispiel #16
0
def searchsorted(a: NdarrayOrTensor, v: NdarrayOrTensor, right=False, sorter=None):
    side = "right" if right else "left"
    if isinstance(a, np.ndarray):
        return np.searchsorted(a, v, side, sorter)  # type: ignore
    if hasattr(torch, "searchsorted"):
        return torch.searchsorted(a, v, right=right)  # type: ignore
    # if using old PyTorch, will convert to numpy array then compute
    ret = np.searchsorted(a.cpu().numpy(), v.cpu().numpy(), side, sorter)  # type: ignore
    ret, *_ = convert_to_dst_type(ret, a)
    return ret
Beispiel #17
0
def allclose(a: NdarrayTensor,
             b: NdarrayOrTensor,
             rtol=1e-5,
             atol=1e-8,
             equal_nan=False) -> bool:
    """`np.allclose` with equivalent implementation for torch."""
    b, *_ = convert_to_dst_type(b, a)
    if isinstance(a, np.ndarray):
        return np.allclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
    return torch.allclose(a, b, rtol=rtol, atol=atol,
                          equal_nan=equal_nan)  # type: ignore
Beispiel #18
0
def mode(x: NdarrayTensor, dim: int = -1, to_long: bool = True) -> NdarrayTensor:
    """`torch.mode` with equivalent implementation for numpy.

    Args:
        x: array/tensor
        dim: dimension along which to perform `mode` (referred to as `axis` by numpy)
        to_long: convert input to long before performing mode.
    """
    dtype = torch.int64 if to_long else None
    x_t, *_ = convert_data_type(x, torch.Tensor, dtype=dtype)
    o_t = torch.mode(x_t, dim).values
    o, *_ = convert_to_dst_type(o_t, x)
    return o
Beispiel #19
0
    def post_convert(
        img: torch.Tensor,
        orig_img: Union[Sequence[NdarrayOrTensor], NdarrayOrTensor],
        item_index: int,
    ) -> NdarrayOrTensor: 
        if isinstance(orig_img, Sequence):
            if isinstance(orig_img[0], Sequence):
                orig_img_ = orig_img[0][item_index]
            elif isinstance(orig_img[0], NdarrayOrTensor):
                orig_img_ = orig_img[0]
            else:
                orig_img_ = orig_img

        out, *_ = convert_to_dst_type(img, orig_img_)
        return out
Beispiel #20
0
 def test_convert_data_type(self, in_image, im_out):
     converted_im, orig_type, orig_device = convert_to_dst_type(in_image, im_out)
     # check input is unchanged
     self.assertEqual(type(in_image), orig_type)
     if isinstance(in_image, torch.Tensor):
         self.assertEqual(in_image.device, orig_device)
     # check output is desired type
     if isinstance(im_out, torch.Tensor):
         output_type = torch.Tensor
     else:
         output_type = np.ndarray
     self.assertEqual(type(converted_im), output_type)
     # check dtype is unchanged
     if isinstance(in_type, (np.ndarray, torch.Tensor)):
         self.assertEqual(converted_im.dtype, im_out.dtype)
Beispiel #21
0
def boxes_center_distance(
    boxes1: NdarrayOrTensor,
    boxes2: NdarrayOrTensor,
    euclidean: bool = True
) -> Tuple[NdarrayOrTensor, NdarrayOrTensor, NdarrayOrTensor]:
    """
    Distance of center points between two sets of boxes

    Args:
        boxes1: bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``
        boxes2: bounding boxes, Mx4 or Mx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``
        euclidean: computed the euclidean distance otherwise it uses the l1 distance

    Returns:
        - The pairwise distances for every element in boxes1 and boxes2,
          with size of (N,M) and same data type as ``boxes1``.
        - Center points of boxes1, with size of (N,spatial_dims) and same data type as ``boxes1``.
        - Center points of boxes2, with size of (M,spatial_dims) and same data type as ``boxes1``.

    Reference:
        https://github.com/MIC-DKFZ/nnDetection/blob/main/nndet/core/boxes/ops.py

    """

    if not isinstance(boxes1, type(boxes2)):
        warnings.warn(
            f"boxes1 is {type(boxes1)}, while boxes2 is {type(boxes2)}. The result will be {type(boxes1)}."
        )

    # convert numpy to tensor if needed
    boxes1_t, *_ = convert_data_type(boxes1, torch.Tensor)
    boxes2_t, *_ = convert_data_type(boxes2, torch.Tensor)

    center1 = box_centers(boxes1_t.to(COMPUTE_DTYPE))  # (N, spatial_dims)
    center2 = box_centers(boxes2_t.to(COMPUTE_DTYPE))  # (M, spatial_dims)

    if euclidean:
        dists = (center1[:, None] - center2[None]).pow(2).sum(-1).sqrt()
    else:
        # before sum: (N, M, spatial_dims)
        dists = (center1[:, None] - center2[None]).sum(-1)

    # convert tensor back to numpy if needed
    (dists, center1, center2), *_ = convert_to_dst_type(src=(dists, center1,
                                                             center2),
                                                        dst=boxes1)
    return dists, center1, center2
Beispiel #22
0
    def __call__(
        self,
        img: NdarrayOrTensor,
        argmax: Optional[bool] = None,
        to_onehot: Optional[int] = None,
        threshold: Optional[float] = None,
        rounding: Optional[str] = None
    ) -> NdarrayOrTensor:
        """
        Args:
            img: the input tensor data to convert, if no channel dimension when converting to `One-Hot`,
                will automatically add it.
            argmax: whether to execute argmax function on input data before transform.
                Defaults to ``self.argmax``.
            to_onehot: if not None, convert input data into the one-hot format with specified number of classes.
                Defaults to ``self.to_onehot``.
            threshold: if not None, threshold the float values to int number 0 or 1 with specified threshold value.
                Defaults to ``self.threshold``.
            rounding: if not None, round the data according to the specified option,
                available options: ["torchrounding"].
        """

        img_t: torch.Tensor
        img_t, *_ = convert_data_type(img, torch.Tensor)  # type: ignore
        if argmax or self.argmax:
            img_t = torch.argmax(img_t, dim=self.kwargs.get("dim", 0), keepdim=self.kwargs.get("keepdim", True))

        to_onehot = self.to_onehot if to_onehot is None else to_onehot
        if to_onehot is not None:
            if not isinstance(to_onehot, int):
                raise ValueError("the number of classes for One-Hot must be an integer.")
            img_t = one_hot(
                img_t, num_classes=to_onehot, dim=self.kwargs.get("dim", 0), dtype=self.kwargs.get("dtype", torch.float)
            )

        threshold = self.threshold if threshold is None else threshold
        if threshold is not None:
            img_t = img_t >= threshold

        rounding = self.rounding if rounding is None else rounding
        if rounding is not None:
            look_up_option(rounding, ["torchrounding"])
            img_t = torch.round(img_t)

        img, *_ = convert_to_dst_type(img_t, img, dtype=self.kwargs.get("dtype", torch.float))
        return img
Beispiel #23
0
    def __call__(self, img: NdarrayOrTensor):
        img_np, *_ = convert_data_type(img, np.ndarray)
        img_flat = img_np.flatten()
        try:
            out_flat = np.copy(img_flat).astype(self.dtype)
        except ValueError:
            # can't copy unchanged labels as the expected dtype is not supported, must map all the label values
            out_flat = np.zeros(shape=img_flat.shape, dtype=self.dtype)

        for o, t in zip(self.orig_labels, self.target_labels):
            if o == t:
                continue
            np.place(out_flat, img_flat == o, t)

        out = out_flat.reshape(img_np.shape)
        out, *_ = convert_to_dst_type(src=out, dst=img, dtype=self.dtype)
        return out
Beispiel #24
0
    def __call__(
        self,
        img: NdarrayOrTensor,
        sigmoid: Optional[bool] = None,
        softmax: Optional[bool] = None,
        other: Optional[Callable] = None,
    ) -> NdarrayOrTensor:
        """
        Args:
            sigmoid: whether to execute sigmoid function on model output before transform.
                Defaults to ``self.sigmoid``.
            softmax: whether to execute softmax function on model output before transform.
                Defaults to ``self.softmax``.
            other: callable function to execute other activation layers, for example:
                `other = torch.tanh`. Defaults to ``self.other``.

        Raises:
            ValueError: When ``sigmoid=True`` and ``softmax=True``. Incompatible values.
            TypeError: When ``other`` is not an ``Optional[Callable]``.
            ValueError: When ``self.other=None`` and ``other=None``. Incompatible values.

        """
        if sigmoid and softmax:
            raise ValueError(
                "Incompatible values: sigmoid=True and softmax=True.")
        if other is not None and not callable(other):
            raise TypeError(
                f"other must be None or callable but is {type(other).__name__}."
            )

        # convert to float as activation must operate on float tensor
        img = convert_to_tensor(img, track_meta=get_track_meta())
        img_t, *_ = convert_data_type(img, torch.Tensor, dtype=torch.float)
        if sigmoid or self.sigmoid:
            img_t = torch.sigmoid(img_t)
        if softmax or self.softmax:
            img_t = torch.softmax(img_t, dim=0)

        act_func = self.other if other is None else other
        if act_func is not None:
            img_t = act_func(img_t)
        out, *_ = convert_to_dst_type(img_t, img)
        return out
Beispiel #25
0
    def forward(self, x: torch.Tensor, y: torch.Tensor, data_range: torch.Tensor) -> torch.Tensor:
        """
        Args:
            x: first sample (e.g., the reference image). Its shape is (B,C,W,H) for 2D data and (B,C,W,H,D) for 3D.
                A fastMRI sample should use the 2D format with C being the number of slices.
            y: second sample (e.g., the reconstructed image). It has similar shape as x.
            data_range: dynamic range of the data

        Returns:
            1-ssim_value (recall this is meant to be a loss function)

        Example:
            .. code-block:: python

                import torch
                x = torch.ones([1,1,10,10])/2
                y = torch.ones([1,1,10,10])/2
                data_range = x.max().unsqueeze(0)
                # the following line should print 1.0 (or 0.9999)
                print(1-SSIMLoss(spatial_dims=2)(x,y,data_range))
        """
        data_range = data_range[(None,) * (self.spatial_dims + 2)]
        # determine whether to work with 2D convolution or 3D
        conv = getattr(F, f"conv{self.spatial_dims}d")
        w = convert_to_dst_type(src=self.w, dst=x)[0]

        c1 = (self.k1 * data_range) ** 2  # stability constant for luminance
        c2 = (self.k2 * data_range) ** 2  # stability constant for contrast
        ux = conv(x, w)  # mu_x
        uy = conv(y, w)  # mu_y
        uxx = conv(x * x, w)  # mu_x^2
        uyy = conv(y * y, w)  # mu_y^2
        uxy = conv(x * y, w)  # mu_xy
        vx = self.cov_norm * (uxx - ux * ux)  # sigma_x
        vy = self.cov_norm * (uyy - uy * uy)  # sigma_y
        vxy = self.cov_norm * (uxy - ux * uy)  # sigma_xy

        numerator = (2 * ux * uy + c1) * (2 * vxy + c2)
        denom = (ux**2 + uy**2 + c1) * (vx + vy + c2)
        ssim_value = numerator / denom
        loss: torch.Tensor = 1 - ssim_value.mean()
        return loss
Beispiel #26
0
def box_iou(boxes1: NdarrayOrTensor,
            boxes2: NdarrayOrTensor) -> NdarrayOrTensor:
    """
    Compute the intersection over union (IoU) of two set of boxes.

    Args:
        boxes1: bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``
        boxes2: bounding boxes, Mx4 or Mx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``

    Returns:
        IoU, with size of (N,M) and same data type as ``boxes1``

    """

    if not isinstance(boxes1, type(boxes2)):
        warnings.warn(
            f"boxes1 is {type(boxes1)}, while boxes2 is {type(boxes2)}. The result will be {type(boxes1)}."
        )

    # convert numpy to tensor if needed
    boxes1_t, *_ = convert_data_type(boxes1, torch.Tensor)
    boxes2_t, *_ = convert_data_type(boxes2, torch.Tensor)

    # we do computation with compute_dtype to avoid overflow
    box_dtype = boxes1_t.dtype

    inter, union = _box_inter_union(boxes1_t,
                                    boxes2_t,
                                    compute_dtype=COMPUTE_DTYPE)

    # compute IoU and convert back to original box_dtype
    iou_t = inter / (union + torch.finfo(COMPUTE_DTYPE).eps)  # (N,M)
    iou_t = iou_t.to(dtype=box_dtype)

    # check if NaN or Inf
    if torch.isnan(iou_t).any() or torch.isinf(iou_t).any():
        raise ValueError("Box IoU is NaN or Inf.")

    # convert tensor back to numpy if needed
    iou, *_ = convert_to_dst_type(src=iou_t, dst=boxes1)
    return iou
Beispiel #27
0
def fftn_centered(im: NdarrayOrTensor,
                  spatial_dims: int,
                  is_complex: bool = True) -> NdarrayOrTensor:
    """
    Pytorch-based fft for spatial_dims-dim signals. "centered" means this function automatically takes care
    of the required ifft and fft shifts. This function calls monai.metworks.blocks.fft_utils_t.fftn_centered_t.
    This is equivalent to do ifft in numpy based on numpy.fft.fftn, numpy.fft.fftshift, and numpy.fft.ifftshift

    Args:
        im: image that can be
            1) real-valued: the shape is (C,H,W) for 2D spatial inputs and (C,H,W,D) for 3D, or
            2) complex-valued: the shape is (C,H,W,2) for 2D spatial data and (C,H,W,D,2) for 3D. C is the number of channels.
        spatial_dims: number of spatial dimensions (e.g., is 2 for an image, and is 3 for a volume)
        is_complex: if True, then the last dimension of the input im is expected to be 2 (representing real and imaginary channels)

    Returns:
        "out" which is the output kspace (fourier of im)

    Example:

        .. code-block:: python

            import torch
            im = torch.ones(1,3,3,2) # the last dim belongs to real/imaginary parts
            # output1 and output2 will be identical
            output1 = torch.fft.fftn(torch.view_as_complex(torch.fft.ifftshift(im,dim=(-3,-2))), dim=(-2,-1), norm="ortho")
            output1 = torch.fft.fftshift( torch.view_as_real(output1), dim=(-3,-2) )

            output2 = fftn_centered(im, spatial_dims=2, is_complex=True)
    """
    # handle numpy format
    im_t, *_ = convert_data_type(im, torch.Tensor)

    # compute ifftn
    out_t = fftn_centered_t(im_t,
                            spatial_dims=spatial_dims,
                            is_complex=is_complex)

    # handle numpy format
    out, *_ = convert_to_dst_type(src=out_t, dst=im)
    return out
Beispiel #28
0
    def __call__(self,
                 img: NdarrayOrTensor,
                 randomize: bool = True) -> NdarrayOrTensor:
        """
        Apply the transform to `img`, if `randomize` randomizing the smooth field otherwise reusing the previous.
        """

        if randomize:
            self.randomize()

        if not self._do_transform:
            return img

        field = self.sfield()
        rfield, *_ = convert_to_dst_type(field, img)

        # everything below here is to be computed using the destination type (numpy, tensor, etc.)

        out = img * rfield

        return out
Beispiel #29
0
    def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
        """
        Fill the holes in the provided image.

        Note:
            The value 0 is assumed as background label.

        Args:
            img: Pytorch Tensor or numpy array of shape [C, spatial_dim1[, spatial_dim2, ...]].

        Raises:
            NotImplementedError: The provided image was not a Pytorch Tensor or numpy array.

        Returns:
            Pytorch Tensor or numpy array of shape [C, spatial_dim1[, spatial_dim2, ...]].
        """
        img = convert_to_tensor(img, track_meta=get_track_meta())
        img_np, *_ = convert_data_type(img, np.ndarray)
        out_np: np.ndarray = fill_holes(img_np, self.applied_labels,
                                        self.connectivity)
        out, *_ = convert_to_dst_type(out_np, img)
        return out
Beispiel #30
0
    def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
        """
        Fill the holes in the provided image.

        Note:
            The value 0 is assumed as background label.

        Args:
            img: Pytorch Tensor or numpy array of shape [C, spatial_dim1[, spatial_dim2, ...]].

        Raises:
            NotImplementedError: The provided image was not a Pytorch Tensor or numpy array.

        Returns:
            Pytorch Tensor or numpy array of shape [C, spatial_dim1[, spatial_dim2, ...]].
        """
        if not isinstance(img, (np.ndarray, torch.Tensor)):
            raise NotImplementedError(f"{self.__class__} can not handle data of type {type(img)}.")
        img_np: np.ndarray
        img_np, *_ = convert_data_type(img, np.ndarray)  # type: ignore
        out_np: np.ndarray = fill_holes(img_np, self.applied_labels, self.connectivity)
        out, *_ = convert_to_dst_type(out_np, img)
        return out