def __init__(self, keys, spatial_size, spacing, magnitude_range, prob=0.1, rotate_range=None, shear_range=None, translate_range=None, scale_range=None, mode='bilinear', padding_mode='zeros', as_tensor_output=False, device=None): """ Args: keys (Hashable items): keys of the corresponding items to be transformed. spatial_size (2 ints): specifying output image spatial size [h, w]. spacing (2 ints): distance in between the control points. magnitude_range (2 ints): the random offsets will be generated from ``uniform[magnitude[0], magnitude[1])``. prob (float): probability of returning a randomized affine grid. defaults to 0.1, with 10% chance returns a randomized grid, otherwise returns a ``spatial_size`` centered area extracted from the input image. mode ('nearest'|'bilinear'): interpolation order. Defaults to ``'bilinear'``. if mode is a tuple of interpolation mode strings, each string corresponds to a key in ``keys``. this is useful to set different modes for different data items. padding_mode ('zeros'|'border'|'reflection'): mode of handling out of range indices. Defaults to ``'zeros'``. as_tensor_output (bool): the computation is implemented using pytorch tensors, this option specifies whether to convert it back to numpy arrays. device (torch.device): device on which the tensor will be allocated. See also: - :py:class:`RandAffineGrid` for the random affine parameters configurations. - :py:class:`Affine` for the affine transformation parameters configurations. """ MapTransform.__init__(self, keys) default_mode = 'bilinear' if isinstance(mode, (tuple, list)) else mode self.rand_2d_elastic = Rand2DElastic(spacing=spacing, magnitude_range=magnitude_range, prob=prob, rotate_range=rotate_range, shear_range=shear_range, translate_range=translate_range, scale_range=scale_range, spatial_size=spatial_size, mode=default_mode, padding_mode=padding_mode, as_tensor_output=as_tensor_output, device=device) self.mode = mode
def __init__(self, keys): """ Args: keys (hashable items): keys of the corresponding items to be transformed. See also: :py:class:`ponai.transforms.compose.MapTransform` """ MapTransform.__init__(self, keys)
def __init__(self, keys, prob=0.1, spatial_axis=None): MapTransform.__init__(self, keys) self.spatial_axis = spatial_axis self.prob = prob self._do_transform = False self.flipper = Flip(spatial_axis=spatial_axis)
def __init__(self, keys, spatial_size, prob=0.1, rotate_range=None, shear_range=None, translate_range=None, scale_range=None, mode='bilinear', padding_mode='zeros', as_tensor_output=True, device=None): """ Args: keys (Hashable items): keys of the corresponding items to be transformed. spatial_size (list or tuple of int): output image spatial size. if ``data`` component has two spatial dimensions, ``spatial_size`` should have 2 elements [h, w]. if ``data`` component has three spatial dimensions, ``spatial_size`` should have 3 elements [h, w, d]. prob (float): probability of returning a randomized affine grid. defaults to 0.1, with 10% chance returns a randomized grid. mode ('nearest'|'bilinear'): interpolation order. Defaults to ``'bilinear'``. if mode is a tuple of interpolation mode strings, each string corresponds to a key in ``keys``. this is useful to set different modes for different data items. padding_mode ('zeros'|'border'|'reflection'): mode of handling out of range indices. Defaults to ``'zeros'``. as_tensor_output (bool): the computation is implemented using pytorch tensors, this option specifies whether to convert it back to numpy arrays. device (torch.device): device on which the tensor will be allocated. See also: - :py:class:`ponai.transforms.compose.MapTransform` - :py:class:`RandAffineGrid` for the random affine parameters configurations. """ MapTransform.__init__(self, keys) default_mode = 'bilinear' if isinstance(mode, (tuple, list)) else mode self.rand_affine = RandAffine(prob=prob, rotate_range=rotate_range, shear_range=shear_range, translate_range=translate_range, scale_range=scale_range, spatial_size=spatial_size, mode=default_mode, padding_mode=padding_mode, as_tensor_output=as_tensor_output, device=device) self.mode = mode
def __init__(self, keys, pixdim, diagonal=False, mode='constant', cval=0, interp_order=3, dtype=None, meta_key_format='{}.{}'): """ Args: pixdim (sequence of floats): output voxel spacing. diagonal (bool): whether to resample the input to have a diagonal affine matrix. If True, the input data is resampled to the following affine:: np.diag((pixdim_0, pixdim_1, pixdim_2, 1)) This effectively resets the volume to the world coordinate system (RAS+ in nibabel). The original orientation, rotation, shearing are not preserved. If False, the axes orientation, orthogonal rotation and translations components from the original affine will be preserved in the target affine. This option will not flip/swap axes against the original ones. mode (`reflect|constant|nearest|mirror|wrap`): The mode parameter determines how the input array is extended beyond its boundaries. Default is 'constant'. cval (scalar): Value to fill past edges of input if mode is "constant". Default is 0.0. interp_order (int or sequence of ints): int: the same interpolation order for all data indexed by `self.keys`; sequence of ints, should correspond to an interpolation order for each data item indexed by `self.keys` respectively. dtype (None or np.dtype): output array data type, defaults to None to use input data's dtype. meta_key_format (str): key format to read/write affine matrices to the data dictionary. """ MapTransform.__init__(self, keys) self.spacing_transform = Spacing(pixdim, diagonal=diagonal, mode=mode, cval=cval, dtype=dtype) interp_order = ensure_tuple(interp_order) self.interp_order = interp_order \ if len(interp_order) == len(self.keys) else interp_order * len(self.keys) self.meta_key_format = meta_key_format
def __init__(self, keys, channel_dim=-1): """ Args: keys (hashable items): keys of the corresponding items to be transformed. See also: :py:class:`ponai.transforms.compose.MapTransform` channel_dim (int): which dimension of input image is the channel, default is the last dimension. """ MapTransform.__init__(self, keys) self.converter = AsChannelFirst(channel_dim=channel_dim)
def __init__(self, keys, dtype=np.float32): """ Args: keys (hashable items): keys of the corresponding items to be transformed. See also: :py:class:`ponai.transforms.compose.MapTransform` dtype (np.dtype, optional): if not None convert the loaded image to this data type. """ MapTransform.__init__(self, keys) self.loader = LoadPNG(dtype)
def __init__(self, keys, k=1, spatial_axes=(0, 1)): """ Args: k (int): number of times to rotate by 90 degrees. spatial_axes (2 ints): defines the plane to rotate with 2 spatial axes. Default: (0, 1), this is the first two axis in spatial dimensions. """ MapTransform.__init__(self, keys) self.k = k self.spatial_axes = spatial_axes self.rotator = Rotate90(self.k, self.spatial_axes)
def __init__(self, keys: KeysCollection, dtype: Union[Sequence[np.dtype], np.dtype] = np.float32): """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`ponai.transforms.compose.MapTransform` dtype: convert image to this data type, default is `np.float32`. it also can be a sequence of np.dtype, each element corresponds to a key in ``keys``. """ MapTransform.__init__(self, keys) self.dtype = ensure_tuple_rep(dtype, len(self.keys)) self.converter = CastToType()
def __init__(self, keys, label_key, size, pos=1, neg=1, num_samples=1, image_key=None, image_threshold=0): MapTransform.__init__(self, keys) assert isinstance(label_key, str), 'label_key must be a string.' assert isinstance(size, (list, tuple)), 'size must be list or tuple.' assert all(isinstance(x, int) and x > 0 for x in size), 'all elements of size must be positive integers.' assert float(pos) >= 0 and float(neg) >= 0, "pos and neg must be greater than or equal to 0." assert float(pos) + float(neg) > 0, "pos and neg cannot both be 0." assert isinstance(num_samples, int), \ "invalid samples number: {}. num_samples must be an integer.".format(num_samples) assert num_samples >= 0, 'num_samples must be greater than or equal to 0.' self.label_key = label_key self.size = size self.pos_ratio = float(pos) / (float(pos) + float(neg)) self.num_samples = num_samples self.image_key = image_key self.image_threshold = image_threshold self.centers = None
def __init__(self, keys, as_closest_canonical=False, dtype=np.float32, meta_key_format='{}.{}', overwriting_keys=False): """ Args: keys (hashable items): keys of the corresponding items to be transformed. See also: :py:class:`ponai.transforms.compose.MapTransform` as_closest_canonical (bool): if True, load the image as closest to canonical axis format. dtype (np.dtype, optional): if not None convert the loaded image to this data type. meta_key_format (str): key format to store meta data of the nifti image. it must contain 2 fields for the key of this image and the key of every meta data item. overwriting_keys (bool): whether allow to overwrite existing keys of meta data. default is False, which will raise exception if encountering existing key. """ MapTransform.__init__(self, keys) self.loader = LoadNifti(as_closest_canonical, False, dtype) self.meta_key_format = meta_key_format self.overwriting_keys = overwriting_keys
def __init__(self, keys, degrees, prob=0.1, spatial_axes=(0, 1), reshape=True, order=1, mode='constant', cval=0, prefilter=True): MapTransform.__init__(self, keys) self.prob = prob self.degrees = degrees self.reshape = reshape self.order = order self.mode = mode self.cval = cval self.prefilter = prefilter self.spatial_axes = spatial_axes if not hasattr(self.degrees, '__iter__'): self.degrees = (-self.degrees, self.degrees) assert len(self.degrees) == 2, "degrees should be a number or pair of numbers." self._do_transform = False self.angle = None
def __init__(self, keys, prob=0.1, min_zoom=0.9, max_zoom=1.1, order=3, mode='constant', cval=0, prefilter=True, use_gpu=False, keep_size=False): MapTransform.__init__(self, keys) if hasattr(min_zoom, '__iter__') and \ hasattr(max_zoom, '__iter__'): assert len(min_zoom) == len(max_zoom), "min_zoom and max_zoom must have same length." self.min_zoom = min_zoom self.max_zoom = max_zoom self.prob = prob self.order = order self.mode = mode self.cval = cval self.prefilter = prefilter self.use_gpu = use_gpu self.keep_size = keep_size self._do_transform = False self._zoom = None
def __init__(self, keys, prob=0.1, max_k=3, spatial_axes=(0, 1)): """ Args: keys (hashable items): keys of the corresponding items to be transformed. See also: :py:class:`ponai.transforms.compose.MapTransform` prob (float): probability of rotating. (Default 0.1, with 10% probability it returns a rotated array.) max_k (int): number of rotations will be sampled from `np.random.randint(max_k) + 1`. (Default 3) spatial_axes (2 ints): defines the plane to rotate with 2 spatial axes. Default: (0, 1), this is the first two axis in spatial dimensions. """ MapTransform.__init__(self, keys) self.prob = min(max(prob, 0.0), 1.0) self.max_k = max_k self.spatial_axes = spatial_axes self._do_transform = False self._rand_k = 0
def __init__(self, keys, axcodes=None, as_closest_canonical=False, labels=tuple(zip('LPI', 'RAS')), meta_key_format='{}.{}'): """ Args: axcodes (N elements sequence): for spatial ND input's orientation. e.g. axcodes='RAS' represents 3D orientation: (Left, Right), (Posterior, Anterior), (Inferior, Superior). default orientation labels options are: 'L' and 'R' for the first dimension, 'P' and 'A' for the second, 'I' and 'S' for the third. as_closest_canonical (boo): if True, load the image as closest to canonical axis format. labels : optional, None or sequence of (2,) sequences (2,) sequences are labels for (beginning, end) of output axis. Defaults to ``(('L', 'R'), ('P', 'A'), ('I', 'S'))``. meta_key_format (str): key format to read/write affine matrices to the data dictionary. See Also: `nibabel.orientations.ornt2axcodes`. """ MapTransform.__init__(self, keys) self.ornt_transform = Orientation( axcodes=axcodes, as_closest_canonical=as_closest_canonical, labels=labels) self.meta_key_format = meta_key_format
def __init__(self, keys, minv=0.0, maxv=1.0, dtype=np.float32): MapTransform.__init__(self, keys) self.rescaler = Rescale(minv, maxv, dtype)
def __init__(self, keys, zoom, order=3, mode='constant', cval=0, prefilter=True, use_gpu=False, keep_size=False): MapTransform.__init__(self, keys) self.zoomer = Zoom(zoom=zoom, order=order, mode=mode, cval=cval, prefilter=prefilter, use_gpu=use_gpu, keep_size=keep_size)
def __init__(self, keys, angle, spatial_axes=(0, 1), reshape=True, order=1, mode='constant', cval=0, prefilter=True): MapTransform.__init__(self, keys) self.rotator = Rotate(angle=angle, spatial_axes=spatial_axes, reshape=reshape, order=order, mode=mode, cval=cval, prefilter=prefilter)
def __init__(self, keys, output_spatial_shape, order=1, mode='reflect', cval=0, clip=True, preserve_range=True, anti_aliasing=True, anti_aliasing_sigma=None): MapTransform.__init__(self, keys) self.resizer = Resize(output_spatial_shape, order, mode, cval, clip, preserve_range, anti_aliasing, anti_aliasing_sigma)
def __init__(self, keys, spatial_axis=None): MapTransform.__init__(self, keys) self.flipper = Flip(spatial_axis=spatial_axis)
def __init__(self, keys, patch_spatial_size): MapTransform.__init__(self, keys) self.patch_spatial_size = (None,) + tuple(patch_spatial_size) self._slices = None
def __init__(self, keys, a_min, a_max, b_min, b_max, clip=False): MapTransform.__init__(self, keys) self.scaler = ScaleIntensityRange(a_min, a_max, b_min, b_max, clip)
def __init__(self, keys, subtrahend=None, divisor=None): MapTransform.__init__(self, keys) self.normalizer = NormalizeIntensity(subtrahend, divisor)