예제 #1
0
 def __init__(
     self,
     keys: KeysCollection,
     output_postfix: str = "discreted",
     argmax: bool = False,
     to_onehot: bool = False,
     n_classes: Optional[int] = None,
     threshold_values: bool = False,
     logit_thresh: float = 0.5,
 ):
     """
     Args:
         keys: keys of the corresponding items to model output and label.
             See also: :py:class:`monai.transforms.compose.MapTransform`
         output_postfix: the postfix string to construct keys to store converted data.
             for example: if the keys of input data is `pred` and `label`, output_postfix is `discreted`,
             the output data keys will be: `pred_discreted`, `label_discreted`.
             if set to None, will replace the original data with the same key.
         argmax: whether to execute argmax function on input data before transform.
         to_onehot: whether to convert input data into the one-hot format. Defaults to False.
         n_classes: the number of classes to convert to One-Hot format.
         threshold_values: whether threshold the float value to int number 0 or 1, default is False.
         logit_thresh: the threshold value for thresholding operation, default is 0.5.
     """
     super().__init__(keys)
     if output_postfix is not None and not isinstance(output_postfix, str):
         raise ValueError("output_postfix must be a string.")
     self.output_postfix = output_postfix
     self.argmax = ensure_tuple_rep(argmax, len(self.keys))
     self.to_onehot = ensure_tuple_rep(to_onehot, len(self.keys))
     self.n_classes = ensure_tuple_rep(n_classes, len(self.keys))
     self.threshold_values = ensure_tuple_rep(threshold_values, len(self.keys))
     self.logit_thresh = ensure_tuple_rep(logit_thresh, len(self.keys))
     self.converter = AsDiscrete()
예제 #2
0
    def __init__(
        self,
        keys,
        prob=0.1,
        min_zoom=0.9,
        max_zoom=1.1,
        order=3,
        mode="constant",
        cval=0,
        prefilter=True,
        use_gpu=False,
        keep_size=False,
    ):
        super().__init__(keys)
        if hasattr(min_zoom, "__iter__") and hasattr(max_zoom, "__iter__"):
            assert len(min_zoom) == len(
                max_zoom), "min_zoom and max_zoom must have same length."
        self.min_zoom = min_zoom
        self.max_zoom = max_zoom
        self.prob = prob
        self.use_gpu = use_gpu
        self.keep_size = keep_size

        self.order = ensure_tuple_rep(order, len(self.keys))
        self.mode = ensure_tuple_rep(mode, len(self.keys))
        self.cval = ensure_tuple_rep(cval, len(self.keys))
        self.prefilter = ensure_tuple_rep(prefilter, len(self.keys))

        self._do_transform = False
        self._zoom = None
예제 #3
0
 def __init__(
     self,
     keys: KeysCollection,
     prefix="Data",
     data_shape=True,
     intensity_range=True,
     data_value=False,
     additional_info=None,
     logger_handler: Optional[Handler] = None,
 ):
     """
     Args:
         keys: keys of the corresponding items to be transformed.
             See also: :py:class:`monai.transforms.compose.MapTransform`
         prefix (string or list of string): will be printed in format: "{prefix} statistics".
         data_shape (bool or list of bool): whether to show the shape of input data.
         intensity_range (bool or list of bool): whether to show the intensity value range of input data.
         data_value (bool or list of bool): whether to show the raw value of input data.
             a typical example is to print some properties of Nifti image: affine, pixdim, etc.
         additional_info (Callable or list of Callable): user can define callable function to extract
             additional info from input data.
         logger_handler (logging.handler): add additional handler to output data: save to file, etc.
             add existing python logging handlers: https://docs.python.org/3/library/logging.handlers.html
     """
     super().__init__(keys)
     self.prefix = ensure_tuple_rep(prefix, len(self.keys))
     self.data_shape = ensure_tuple_rep(data_shape, len(self.keys))
     self.intensity_range = ensure_tuple_rep(intensity_range,
                                             len(self.keys))
     self.data_value = ensure_tuple_rep(data_value, len(self.keys))
     self.additional_info = ensure_tuple_rep(additional_info,
                                             len(self.keys))
     self.logger_handler = logger_handler
     self.printer = DataStats(logger_handler=logger_handler)
예제 #4
0
    def __init__(
        self,
        keys,
        degrees,
        prob=0.1,
        spatial_axes=(0, 1),
        reshape=True,
        order=1,
        mode="constant",
        cval=0,
        prefilter=True,
    ):
        super().__init__(keys)
        self.prob = prob
        self.degrees = degrees
        self.reshape = reshape
        self.spatial_axes = spatial_axes

        self.order = ensure_tuple_rep(order, len(self.keys))
        self.mode = ensure_tuple_rep(mode, len(self.keys))
        self.cval = ensure_tuple_rep(cval, len(self.keys))
        self.prefilter = ensure_tuple_rep(prefilter, len(self.keys))

        if not hasattr(self.degrees, "__iter__"):
            self.degrees = (-self.degrees, self.degrees)
        assert len(self.degrees
                   ) == 2, "degrees should be a number or pair of numbers."

        self._do_transform = False
        self.angle = None
예제 #5
0
    def __init__(
        self,
        keys: KeysCollection,
        range_x=0.0,
        range_y=0.0,
        range_z=0.0,
        prob: float = 0.1,
        keep_size: bool = True,
        interp_order: str = "bilinear",
        mode: str = "border",
        align_corners: bool = False,
    ):
        super().__init__(keys)
        self.range_x = ensure_tuple(range_x)
        if len(self.range_x) == 1:
            self.range_x = tuple(sorted([-self.range_x[0], self.range_x[0]]))
        self.range_y = ensure_tuple(range_y)
        if len(self.range_y) == 1:
            self.range_y = tuple(sorted([-self.range_y[0], self.range_y[0]]))
        self.range_z = ensure_tuple(range_z)
        if len(self.range_z) == 1:
            self.range_z = tuple(sorted([-self.range_z[0], self.range_z[0]]))

        self.prob = prob
        self.keep_size = keep_size
        self.interp_order = ensure_tuple_rep(interp_order, len(self.keys))
        self.mode = ensure_tuple_rep(mode, len(self.keys))
        self.align_corners = align_corners

        self._do_transform = False
        self.x = 0.0
        self.y = 0.0
        self.z = 0.0
예제 #6
0
    def __init__(
        self,
        keys: KeysCollection,
        argmax: bool = False,
        to_onehot: bool = False,
        n_classes: Optional[int] = None,
        threshold_values: bool = False,
        logit_thresh: float = 0.5,
    ):
        """
        Args:
            keys: keys of the corresponding items to model output and label.
                See also: :py:class:`monai.transforms.compose.MapTransform`
            argmax: whether to execute argmax function on input data before transform.
            to_onehot: whether to convert input data into the one-hot format. Defaults to False.
            n_classes: the number of classes to convert to One-Hot format.
            threshold_values: whether threshold the float value to int number 0 or 1, default is False.
            logit_thresh: the threshold value for thresholding operation, default is 0.5.

        """
        super().__init__(keys)
        self.argmax = ensure_tuple_rep(argmax, len(self.keys))
        self.to_onehot = ensure_tuple_rep(to_onehot, len(self.keys))
        self.n_classes = ensure_tuple_rep(n_classes, len(self.keys))
        self.threshold_values = ensure_tuple_rep(threshold_values,
                                                 len(self.keys))
        self.logit_thresh = ensure_tuple_rep(logit_thresh, len(self.keys))
        self.converter = AsDiscrete()
예제 #7
0
    def __init__(self,
                 keys: KeysCollection,
                 output_postfixes,
                 to_onehot=False,
                 num_classes=None):
        """
        Args:
            keys: keys of the corresponding items to be transformed.
                See also: :py:class:`monai.transforms.compose.MapTransform`
            output_postfixes (list, tuple): the postfixes to construct keys to store splitted data.
                for example: if the key of input data is `pred` and split 2 classes, the output
                data keys will be: pred_(output_postfixes[0]), pred_(output_postfixes[1])
            to_onehot (bool or list of bool): whether to convert the data to One-Hot format, default is False.
            num_classes (int or list of int): the class number used to convert to One-Hot format
                if `to_onehot` is True.

        """
        super().__init__(keys)
        if not isinstance(output_postfixes, (list, tuple)):
            raise ValueError(
                "must specify key postfixes to store splitted data.")
        self.output_postfixes = output_postfixes
        self.to_onehot = ensure_tuple_rep(to_onehot, len(self.keys))
        self.num_classes = ensure_tuple_rep(num_classes, len(self.keys))
        self.splitter = SplitChannel()
예제 #8
0
 def __init__(self,
              keys: Hashable,
              output_postfix: str = "act",
              sigmoid=False,
              softmax=False,
              other=None):
     """
     Args:
         keys (hashable items): keys of the corresponding items to model output and label.
             See also: :py:class:`monai.transforms.compose.MapTransform`
         output_postfix (str): the postfix string to construct keys to store converted data.
             for example: if the keys of input data is `pred` and `label`, output_postfix is `act`,
             the output data keys will be: `pred_act`, `label_act`.
             if set to None, will replace the original data with the same key.
         sigmoid (bool, tuple or list of bool): whether to execute sigmoid function on model
             output before transform.
         softmax (bool, tuple or list of bool): whether to execute softmax function on model
             output before transform.
         other (Callable, tuple or list of Callables): callable function to execute other activation layers,
             for example: `other = lambda x: torch.tanh(x)`
     """
     super().__init__(keys)
     if output_postfix is not None and not isinstance(output_postfix, str):
         raise ValueError("output_postfix must be a string.")
     self.output_postfix = output_postfix
     self.sigmoid = ensure_tuple_rep(sigmoid, len(self.keys))
     self.softmax = ensure_tuple_rep(softmax, len(self.keys))
     self.other = ensure_tuple_rep(other, len(self.keys))
     self.converter = Activations()
예제 #9
0
def resize_boxes(boxes: NdarrayOrTensor, src_spatial_size: Union[Sequence[int],
                                                                 int],
                 dst_spatial_size: Union[Sequence[int], int]):
    """
    Resize boxes when the corresponding image is resized

    Args:
        boxes: source bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``
        src_spatial_size: source image spatial size.
        dst_spatial_size: target image spatial size.

    Returns:
        resized boxes, with same data type as ``boxes``, does not share memory with ``boxes``

    Example:
        .. code-block:: python

            boxes = torch.ones(1,4)
            src_spatial_size = [100, 100]
            dst_spatial_size = [128, 256]
            resize_boxes(boxes, src_spatial_size, dst_spatial_size) #  will return tensor([[1.28, 2.56, 1.28, 2.56]])
    """
    spatial_dims: int = get_spatial_dims(boxes=boxes)

    src_spatial_size = ensure_tuple_rep(src_spatial_size, spatial_dims)
    dst_spatial_size = ensure_tuple_rep(dst_spatial_size, spatial_dims)

    zoom = [
        dst_spatial_size[axis] / float(src_spatial_size[axis])
        for axis in range(spatial_dims)
    ]

    return zoom_boxes(boxes=boxes, zoom=zoom)
예제 #10
0
파일: dictionary.py 프로젝트: xidchen/MONAI
    def __init__(
        self,
        keys: Hashable,
        prob: float = 0.1,
        min_zoom=0.9,
        max_zoom=1.1,
        interp_order=InterpolationCode.SPLINE3,
        mode="constant",
        cval=0,
        prefilter=True,
        use_gpu: bool = False,
        keep_size: bool = True,
    ):
        super().__init__(keys)
        if hasattr(min_zoom, "__iter__") and hasattr(max_zoom, "__iter__"):
            assert len(min_zoom) == len(
                max_zoom), "min_zoom and max_zoom must have same length."
        self.min_zoom = min_zoom
        self.max_zoom = max_zoom
        self.prob = prob
        self.use_gpu = use_gpu
        self.keep_size = keep_size

        self.interp_order = ensure_tuple_rep(interp_order, len(self.keys))
        self.mode = ensure_tuple_rep(mode, len(self.keys))
        self.cval = ensure_tuple_rep(cval, len(self.keys))
        self.prefilter = ensure_tuple_rep(prefilter, len(self.keys))

        self._do_transform = False
        self._zoom = None
예제 #11
0
 def __init__(
     self,
     keys,
     spatial_size,
     sigma_range,
     magnitude_range,
     prob=0.1,
     rotate_range=None,
     shear_range=None,
     translate_range=None,
     scale_range=None,
     mode="bilinear",
     padding_mode="zeros",
     as_tensor_output=False,
     device=None,
 ):
     """
     Args:
         keys (Hashable items): keys of the corresponding items to be transformed.
         spatial_size (3 ints): specifying output image spatial size [h, w, d].
         sigma_range (2 ints): a Gaussian kernel with standard deviation sampled
              from ``uniform[sigma_range[0], sigma_range[1])`` will be used to smooth the random offset grid.
         magnitude_range (2 ints): the random offsets on the grid will be generated from
             ``uniform[magnitude[0], magnitude[1])``.
         prob (float): probability of returning a randomized affine grid.
             defaults to 0.1, with 10% chance returns a randomized grid,
             otherwise returns a ``spatial_size`` centered area extracted from the input image.
         mode (str or sequence of str): interpolation order.
             Available options are 'nearest', 'bilinear'. Defaults to ``'bilinear'``.
             if mode is a tuple of interpolation mode strings, each string corresponds to a key in ``keys``.
             this is useful to set different modes for different data items.
         padding_mode (str or sequence of str): mode of handling out of range indices.
             Available options are 'zeros', 'border', 'reflection'.  Defaults to ``'zeros'``.
         as_tensor_output (bool): the computation is implemented using pytorch tensors, this option specifies
             whether to convert it back to numpy arrays.
         device (torch.device): device on which the tensor will be allocated.
     See also:
         - :py:class:`RandAffineGrid` for the random affine parameters configurations.
         - :py:class:`Affine` for the affine transformation parameters configurations.
     """
     super().__init__(keys)
     self.rand_3d_elastic = Rand3DElastic(
         sigma_range=sigma_range,
         magnitude_range=magnitude_range,
         prob=prob,
         rotate_range=rotate_range,
         shear_range=shear_range,
         translate_range=translate_range,
         scale_range=scale_range,
         spatial_size=spatial_size,
         as_tensor_output=as_tensor_output,
         device=device,
     )
     self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys))
     self.mode = ensure_tuple_rep(mode, len(self.keys))
예제 #12
0
    def __init__(
        self,
        keys,
        spatial_size,
        prob=0.1,
        rotate_range=None,
        shear_range=None,
        translate_range=None,
        scale_range=None,
        mode="bilinear",
        padding_mode="zeros",
        as_tensor_output=True,
        device=None,
    ):
        """
        Args:
            keys (Hashable items): keys of the corresponding items to be transformed.
            spatial_size (list or tuple of int): output image spatial size.
                if ``data`` component has two spatial dimensions, ``spatial_size`` should have 2 elements [h, w].
                if ``data`` component has three spatial dimensions, ``spatial_size`` should have 3 elements [h, w, d].
            prob (float): probability of returning a randomized affine grid.
                defaults to 0.1, with 10% chance returns a randomized grid.
            mode (str or sequence of str): interpolation order.
                Available options are 'nearest', 'bilinear'. Defaults to ``'bilinear'``.
                if mode is a tuple of interpolation mode strings, each string corresponds to a key in ``keys``.
                this is useful to set different modes for different data items.
            padding_mode (str or sequence of str): mode of handling out of range indices.
                Available options are 'zeros', 'border', 'reflection'.  Defaults to ``'zeros'``.
            as_tensor_output (bool): the computation is implemented using pytorch tensors, this option specifies
                whether to convert it back to numpy arrays.
            device (torch.device): device on which the tensor will be allocated.

        See also:
            - :py:class:`monai.transforms.compose.MapTransform`
            - :py:class:`RandAffineGrid` for the random affine parameters configurations.
        """
        super().__init__(keys)
        self.rand_affine = RandAffine(
            prob=prob,
            rotate_range=rotate_range,
            shear_range=shear_range,
            translate_range=translate_range,
            scale_range=scale_range,
            spatial_size=spatial_size,
            as_tensor_output=as_tensor_output,
            device=device,
        )
        self.padding_mode = ensure_tuple_rep(padding_mode, len(self.keys))
        self.mode = ensure_tuple_rep(mode, len(self.keys))
예제 #13
0
    def __init__(
        self,
        keys: KeysCollection,
        pixdim,
        diagonal: bool = False,
        interp_order: str = "bilinear",
        mode: str = "border",
        dtype: Optional[np.dtype] = None,
        meta_key_postfix: str = "meta_dict",
    ):
        """
        Args:
            pixdim (sequence of floats): output voxel spacing.
            diagonal: whether to resample the input to have a diagonal affine matrix.
                If True, the input data is resampled to the following affine::

                    np.diag((pixdim_0, pixdim_1, pixdim_2, 1))

                This effectively resets the volume to the world coordinate system (RAS+ in nibabel).
                The original orientation, rotation, shearing are not preserved.

                If False, the axes orientation, orthogonal rotation and
                translations components from the original affine will be
                preserved in the target affine. This option will not flip/swap
                axes against the original ones.
            interp_order (`nearest|bilinear` or a sequence of str): str: the same interpolation order
                for all data indexed by `self.keys`; sequence of str, should
                correspond to an interpolation order for each data item indexed
                by `self.keys` respectively. Defaults to `bilinear`.
            mode (str or sequence of str):
                Available options are `zeros|border|reflection`.
                The mode parameter determines how the input array is extended beyond its boundaries.
                Default is 'border'.
            dtype (None or np.dtype or sequence of np.dtype): output array data type.
                Defaults to None to use input data's dtype.
            meta_key_postfix: use `key_{postfix}` to to fetch the meta data according to the key data,
                default is `meta_dict`, the meta data is a dictionary object.
                For example, to handle key `image`,  read/write affine matrices from the
                metadata `image_meta_dict` dictionary's `affine` field.
        """
        super().__init__(keys)
        self.spacing_transform = Spacing(pixdim, diagonal=diagonal)
        self.interp_order = ensure_tuple_rep(interp_order, len(self.keys))
        self.mode = ensure_tuple_rep(mode, len(self.keys))
        self.dtype = ensure_tuple_rep(dtype, len(self.keys))
        if not isinstance(meta_key_postfix, str):
            raise ValueError("meta_key_postfix must be a string.")
        self.meta_key_postfix = meta_key_postfix
예제 #14
0
def flip_boxes(
    boxes: NdarrayOrTensor,
    spatial_size: Union[Sequence[int], int],
    flip_axes: Optional[Union[Sequence[int], int]] = None,
) -> NdarrayOrTensor:
    """
    Flip boxes when the corresponding image is flipped

    Args:
        boxes: bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``
        spatial_size: image spatial size.
        flip_axes: spatial axes along which to flip over. Default is None.
            The default `axis=None` will flip over all of the axes of the input array.
            If axis is negative it counts from the last to the first axis.
            If axis is a tuple of ints, flipping is performed on all of the axes
            specified in the tuple.

    Returns:
        flipped boxes, with same data type as ``boxes``, does not share memory with ``boxes``
    """
    spatial_dims: int = get_spatial_dims(boxes=boxes)
    spatial_size = ensure_tuple_rep(spatial_size, spatial_dims)
    if flip_axes is None:
        flip_axes = tuple(range(0, spatial_dims))
    flip_axes = ensure_tuple(flip_axes)

    # flip box
    _flip_boxes = deepcopy(boxes)
    for axis in flip_axes:
        _flip_boxes[:, axis + spatial_dims] = spatial_size[axis] - boxes[:, axis] - TO_REMOVE
        _flip_boxes[:, axis] = spatial_size[axis] - boxes[:, axis + spatial_dims] - TO_REMOVE

    return _flip_boxes
예제 #15
0
파일: array.py 프로젝트: wentaozhu/MONAI
 def randomize(self, img_size):
     self._size = ensure_tuple_rep(self.roi_size, len(img_size))
     if self.random_size:
         self._size = [self.R.randint(low=self._size[i], high=img_size[i] + 1) for i in range(len(img_size))]
     if self.random_center:
         valid_size = get_valid_patch_size(img_size, self._size)
         self._slices = ensure_tuple(slice(None)) + get_random_patch(img_size, valid_size, self.R)
예제 #16
0
def write_png(data,
              file_name: str,
              output_shape=None,
              interp_order: str = "bicubic",
              scale=None):
    """
    Write numpy data into png files to disk.
    Spatially it supports HW for 2D.(H,W) or (H,W,3) or (H,W,4).
    If `scale` is None, expect the input data in `np.uint8` or `np.uint16` type.
    It's based on the Image module in PIL library:
    https://pillow.readthedocs.io/en/stable/reference/Image.html

    Args:
        data (numpy.ndarray): input data to write to file.
        file_name: expected file name that saved on disk.
        output_shape (None or tuple of ints): output image shape.
        interp_order (`nearest|linear|bilinear|bicubic|trilinear|area`):
            the interpolation mode. Default="bicubic".
            See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
        scale (255, 65535): postprocess data by clipping to [0, 1] and scaling to
            [0, 255] (uint8) or [0, 65535] (uint16). Default is None to disable scaling.

    """
    assert isinstance(data, np.ndarray), "input data must be numpy array."
    if len(
            data.shape
    ) == 3 and data.shape[2] == 1:  # PIL Image can't save image with 1 channel
        data = data.squeeze(2)
    if output_shape is not None:
        output_shape = ensure_tuple_rep(output_shape, 2)
        align_corners = False if interp_order in ("linear", "bilinear",
                                                  "bicubic",
                                                  "trilinear") else None
        xform = Resize(spatial_size=output_shape,
                       interp_order=interp_order,
                       align_corners=align_corners)
        _min, _max = np.min(data), np.max(data)
        if len(data.shape) == 3:
            data = np.moveaxis(data, -1, 0)  # to channel first
            data = xform(data)
            data = np.moveaxis(data, 0, -1)
        else:  # (H, W)
            data = np.expand_dims(data, 0)  # make a channel
            data = xform(data)[0]  # first channel
        if interp_order != "nearest":
            data = np.clip(data, _min, _max)

    if scale is not None:
        data = np.clip(data, 0.0,
                       1.0)  # png writer only can scale data in range [0, 1]
        if scale == np.iinfo(np.uint8).max:
            data = (scale * data).astype(np.uint8)
        elif scale == np.iinfo(np.uint16).max:
            data = (scale * data).astype(np.uint16)
        else:
            raise ValueError(f"unsupported scale value: {scale}.")

    img = Image.fromarray(data)
    img.save(file_name, "PNG")
    return
예제 #17
0
    def __init__(self,
                 keys,
                 zoom,
                 order=3,
                 mode="constant",
                 cval=0,
                 prefilter=True,
                 use_gpu=False,
                 keep_size=False):
        super().__init__(keys)
        self.zoomer = Zoom(zoom=zoom, use_gpu=use_gpu, keep_size=keep_size)

        self.order = ensure_tuple_rep(order, len(self.keys))
        self.mode = ensure_tuple_rep(mode, len(self.keys))
        self.cval = ensure_tuple_rep(cval, len(self.keys))
        self.prefilter = ensure_tuple_rep(prefilter, len(self.keys))
예제 #18
0
    def __init__(
        self,
        keys: KeysCollection,
        angle,
        keep_size: bool = True,
        interp_order: str = "bilinear",
        mode: str = "border",
        align_corners: bool = False,
    ):
        super().__init__(keys)
        self.rotator = Rotate(angle=angle,
                              keep_size=keep_size,
                              align_corners=align_corners)

        self.interp_order = ensure_tuple_rep(interp_order, len(self.keys))
        self.mode = ensure_tuple_rep(mode, len(self.keys))
예제 #19
0
파일: dictionary.py 프로젝트: htudu/MONAI
    def __init__(
        self,
        keys: Hashable,
        pixdim,
        diagonal: bool = False,
        interp_order=3,
        mode="nearest",
        cval=0,
        dtype: Optional[np.dtype] = None,
        meta_key_format: str = "{}.{}",
    ):
        """
        Args:
            pixdim (sequence of floats): output voxel spacing.
            diagonal (bool): whether to resample the input to have a diagonal affine matrix.
                If True, the input data is resampled to the following affine::

                    np.diag((pixdim_0, pixdim_1, pixdim_2, 1))

                This effectively resets the volume to the world coordinate system (RAS+ in nibabel).
                The original orientation, rotation, shearing are not preserved.

                If False, the axes orientation, orthogonal rotation and
                translations components from the original affine will be
                preserved in the target affine. This option will not flip/swap
                axes against the original ones.
            interp_order (int or sequence of ints): int: the same interpolation order
                for all data indexed by `self.keys`; sequence of ints, should
                correspond to an interpolation order for each data item indexed
                by `self.keys` respectively.
            mode (str or sequence of str):
                Available options are `reflect|constant|nearest|mirror|wrap`.
                The mode parameter determines how the input array is extended beyond its boundaries.
                Default is 'nearest'.
            cval (scalar or sequence of scalars): Value to fill past edges of input if mode is "constant". Default is 0.0.
            dtype (None or np.dtype or sequence of np.dtype): output array data type, defaults to None to use input data's dtype.
            meta_key_format (str): key format to read/write affine matrices to the data dictionary.
        """
        super().__init__(keys)
        self.spacing_transform = Spacing(pixdim, diagonal=diagonal)
        self.interp_order = ensure_tuple_rep(interp_order, len(self.keys))
        self.mode = ensure_tuple_rep(mode, len(self.keys))
        self.cval = ensure_tuple_rep(cval, len(self.keys))
        self.dtype = ensure_tuple_rep(dtype, len(self.keys))
        self.meta_key_format = meta_key_format
예제 #20
0
 def __init__(self,
              keys: KeysCollection,
              spatial_size,
              interp_order: str = "area",
              align_corners: Optional[bool] = None):
     super().__init__(keys)
     self.interp_order = ensure_tuple_rep(interp_order, len(self.keys))
     self.resizer = Resize(spatial_size=spatial_size,
                           align_corners=align_corners)
예제 #21
0
def rot90_boxes(boxes: NdarrayOrTensor,
                spatial_size: Union[Sequence[int], int],
                k: int = 1,
                axes: Tuple[int, int] = (0, 1)):
    """
    Rotate boxes by 90 degrees in the plane specified by axes.
    Rotation direction is from the first towards the second axis.

    Args:
        boxes: bounding boxes, Nx4 or Nx6 torch tensor or ndarray. The box mode is assumed to be ``StandardMode``
        spatial_size: image spatial size.
        k : number of times the array is rotated by 90 degrees.
        axes: (2,) array_like
            The array is rotated in the plane defined by the axes. Axes must be different.

    Returns:
        A rotated view of `boxes`.

    Notes:
        ``rot90_boxes(boxes, spatial_size, k=1, axes=(1,0))``  is the reverse of
        ``rot90_boxes(boxes, spatial_size, k=1, axes=(0,1))``
        ``rot90_boxes(boxes, spatial_size, k=1, axes=(1,0))`` is equivalent to
        ``rot90_boxes(boxes, spatial_size, k=-1, axes=(0,1))``
    """
    spatial_dims: int = get_spatial_dims(boxes=boxes)
    spatial_size_ = list(ensure_tuple_rep(spatial_size, spatial_dims))

    axes = ensure_tuple(axes)  # type: ignore

    if len(axes) != 2:
        raise ValueError("len(axes) must be 2.")

    if axes[0] == axes[1] or abs(axes[0] - axes[1]) == spatial_dims:
        raise ValueError("Axes must be different.")

    if axes[0] >= spatial_dims or axes[0] < -spatial_dims or axes[
            1] >= spatial_dims or axes[1] < -spatial_dims:
        raise ValueError(
            f"Axes={axes} out of range for array of ndim={spatial_dims}.")

    k %= 4

    if k == 0:
        return boxes
    if k == 2:
        return flip_boxes(flip_boxes(boxes, spatial_size_, axes[0]),
                          spatial_size_, axes[1])

    if k == 1:
        boxes_ = flip_boxes(boxes, spatial_size_, axes[1])
        return swapaxes_boxes(boxes_, axes[0], axes[1])
    else:
        # k == 3
        boxes_ = swapaxes_boxes(boxes, axes[0], axes[1])
        spatial_size_[axes[0]], spatial_size_[axes[1]] = spatial_size_[
            axes[1]], spatial_size_[axes[0]]
        return flip_boxes(boxes_, spatial_size_, axes[1])
예제 #22
0
파일: dictionary.py 프로젝트: hoangpq/MONAI
    def __init__(
        self,
        keys,
        angle,
        spatial_axes=(0, 1),
        reshape=True,
        interp_order=InterpolationCode.LINEAR,
        mode="constant",
        cval=0,
        prefilter=True,
    ):
        super().__init__(keys)
        self.rotator = Rotate(angle=angle, spatial_axes=spatial_axes, reshape=reshape)

        self.interp_order = ensure_tuple_rep(interp_order, len(self.keys))
        self.mode = ensure_tuple_rep(mode, len(self.keys))
        self.cval = ensure_tuple_rep(cval, len(self.keys))
        self.prefilter = ensure_tuple_rep(prefilter, len(self.keys))
예제 #23
0
파일: dictionary.py 프로젝트: hoangpq/MONAI
    def __init__(
        self,
        keys,
        zoom,
        interp_order=InterpolationCode.SPLINE3,
        mode="constant",
        cval=0,
        prefilter=True,
        use_gpu=False,
        keep_size=False,
    ):
        super().__init__(keys)
        self.zoomer = Zoom(zoom=zoom, use_gpu=use_gpu, keep_size=keep_size)

        self.interp_order = ensure_tuple_rep(interp_order, len(self.keys))
        self.mode = ensure_tuple_rep(mode, len(self.keys))
        self.cval = ensure_tuple_rep(cval, len(self.keys))
        self.prefilter = ensure_tuple_rep(prefilter, len(self.keys))
예제 #24
0
    def __init__(self,
                 keys,
                 angle,
                 spatial_axes=(0, 1),
                 reshape=True,
                 order=1,
                 mode="constant",
                 cval=0,
                 prefilter=True):
        super().__init__(keys)
        self.rotator = Rotate(angle=angle,
                              spatial_axes=spatial_axes,
                              reshape=reshape)

        self.order = ensure_tuple_rep(order, len(self.keys))
        self.mode = ensure_tuple_rep(mode, len(self.keys))
        self.cval = ensure_tuple_rep(cval, len(self.keys))
        self.prefilter = ensure_tuple_rep(prefilter, len(self.keys))
예제 #25
0
파일: array.py 프로젝트: wentaozhu/MONAI
    def __call__(self, img, mode: Optional[str] = None):
        spatial_shape = img.shape[1:]
        k = ensure_tuple_rep(self.k, len(spatial_shape))
        new_size = []
        for k_d, dim in zip(k, spatial_shape):
            new_dim = int(np.ceil(dim / k_d) * k_d) if k_d > 0 else dim
            new_size.append(new_dim)

        spatial_pad = SpatialPad(spatial_size=new_size, method="symmetric", mode=mode or self.mode)
        return spatial_pad(img)
예제 #26
0
 def __init__(self, keys: KeysCollection, delay_time=0.0):
     """
     Args:
         keys: keys of the corresponding items to be transformed.
             See also: :py:class:`monai.transforms.compose.MapTransform`
         delay_time(float or list of float): The minimum amount of time, in fractions of seconds,
             to accomplish this identity task. If a list is provided, it must be of length equal
             to the keys representing the delay for each key element.
     """
     super().__init__(keys)
     self.delay_time = ensure_tuple_rep(delay_time, len(self.keys))
     self.delayer = SimulateDelay()
예제 #27
0
    def __init__(self,
                 keys: KeysCollection,
                 sigmoid=False,
                 softmax=False,
                 other=None):
        """
        Args:
            keys: keys of the corresponding items to model output and label.
                See also: :py:class:`monai.transforms.compose.MapTransform`
            sigmoid (bool, tuple or list of bool): whether to execute sigmoid function on model
                output before transform.
            softmax (bool, tuple or list of bool): whether to execute softmax function on model
                output before transform.
            other (Callable, tuple or list of Callables): callable function to execute other activation layers,
                for example: `other = lambda x: torch.tanh(x)`

        """
        super().__init__(keys)
        self.sigmoid = ensure_tuple_rep(sigmoid, len(self.keys))
        self.softmax = ensure_tuple_rep(softmax, len(self.keys))
        self.other = ensure_tuple_rep(other, len(self.keys))
        self.converter = Activations()
예제 #28
0
 def __init__(
     self,
     keys: KeysCollection,
     zoom,
     interp_order: str = "area",
     align_corners: Optional[bool] = None,
     keep_size: bool = True,
 ):
     super().__init__(keys)
     self.zoomer = Zoom(zoom=zoom,
                        align_corners=align_corners,
                        keep_size=keep_size)
     self.interp_order = ensure_tuple_rep(interp_order, len(self.keys))
예제 #29
0
    def __init__(self, keys: KeysCollection, k, mode="constant"):
        """
        Args:
            k (int or sequence of int): the target k for each spatial dimension.
                if `k` is negative or 0, the original size is preserved.
                if `k` is an int, the same `k` be applied to all the input spatial dimensions.
            mode (str or sequence of str): padding mode for SpatialPad.

        See also :py:class:`monai.transforms.SpatialPad`
        """
        super().__init__(keys)
        self.mode = ensure_tuple_rep(mode, len(self.keys))
        self.padder = DivisiblePad(k=k)
예제 #30
0
def write_png(
    data,
    file_name: str,
    output_shape=None,
    interp_order: str = "bicubic",
    scale: bool = False,
    plugin: Optional[str] = None,
    **plugin_args,
):
    """
    Write numpy data into png files to disk.
    Spatially it supports HW for 2D.(H,W) or (H,W,3) or (H,W,4)
    It's based on skimage library: https://scikit-image.org/docs/dev/api/skimage

    Args:
        data (numpy.ndarray): input data to write to file.
        file_name: expected file name that saved on disk.
        output_shape (None or tuple of ints): output image shape.
        interp_order (`nearest|linear|bilinear|bicubic|trilinear|area`):
            the interpolation mode. Default="bicubic".
            See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
        scale: whether to postprocess data by clipping to [0, 1] and scaling [0, 255] (uint8).
        plugin: name of plugin to use in `imsave`. By default, the different plugins
            are tried(starting with imageio) until a suitable candidate is found.
        plugin_args (keywords): arguments passed to the given plugin.

    """
    assert isinstance(data, np.ndarray), "input data must be numpy array."

    if output_shape is not None:
        output_shape = ensure_tuple_rep(output_shape, 2)
        xform = Resize(spatial_size=output_shape, interp_order=interp_order)
        _min, _max = np.min(data), np.max(data)
        if len(data.shape) == 3:
            data = np.moveaxis(data, -1, 0)  # to channel first
            data = xform(data)
            data = np.moveaxis(data, 0, -1)
        else:  # (H, W)
            data = np.expand_dims(data, 0)  # make a channel
            data = xform(data)[0]  # first channel
        if interp_order != "nearest":
            data = np.clip(data, _min, _max)

    if scale:
        data = np.clip(data, 0.0,
                       1.0)  # png writer only can scale data in range [0, 1].
        data = 255 * data
    data = data.astype(np.uint8)
    io.imsave(file_name, data, plugin=plugin, **plugin_args)
    return