コード例 #1
0
ファイル: augmentation3d.py プロジェクト: wyli/kornia
    def __init__(self,
                 degrees: Union[torch.Tensor, float, Tuple[float, float,
                                                           float],
                                Tuple[Tuple[float, float], Tuple[float, float],
                                      Tuple[float, float]]],
                 interpolation: Optional[Union[str, int, Resample]] = None,
                 resample: Union[str, int, Resample] = Resample.BILINEAR.name,
                 return_transform: bool = False,
                 same_on_batch: bool = False,
                 align_corners: bool = False,
                 p: float = 0.5) -> None:
        super(RandomRotation3D,
              self).__init__(p=p,
                             return_transform=return_transform,
                             same_on_batch=same_on_batch)
        self.degrees = _tuple_range_reader(degrees, 3)
        if interpolation is not None:
            import warnings
            warnings.warn(
                "interpolation is deprecated. Please use resample instead.",
                category=DeprecationWarning)
            self.resample = Resample.get(interpolation)

        self.resample = Resample.get(resample)
        self.align_corners = align_corners
        self.flags: Dict[str, torch.Tensor] = dict(
            resample=torch.tensor(self.resample.value),
            align_corners=torch.tensor(align_corners))
コード例 #2
0
ファイル: augmentation3d.py プロジェクト: jhacsonmeza/kornia
 def __init__(
     self,
     kernel_size: Union[int, Tuple[int, int]],
     angle: Union[
         torch.Tensor,
         float,
         Tuple[float, float, float],
         Tuple[Tuple[float, float], Tuple[float, float], Tuple[float, float]],
     ],
     direction: Union[torch.Tensor, float, Tuple[float, float]],
     border_type: Union[int, str, BorderType] = BorderType.CONSTANT.name,
     resample: Union[str, int, Resample] = Resample.NEAREST.name,
     return_transform: bool = False,
     same_on_batch: bool = False,
     p: float = 0.5,
     keepdim: bool = False,
 ) -> None:
     super(RandomMotionBlur3D, self).__init__(
         p=p, return_transform=return_transform, same_on_batch=same_on_batch, p_batch=1.0, keepdim=keepdim
     )
     self._device, self._dtype = _extract_device_dtype([angle, direction])
     self.kernel_size: Union[int, Tuple[int, int]] = kernel_size
     self.angle = angle
     self.direction = direction
     self.resample = Resample.get(resample)
     self.border_type = BorderType.get(border_type)
     self.flags: Dict[str, torch.Tensor] = {
         "border_type": torch.tensor(self.border_type.value),
         "interpolation": torch.tensor(self.resample.value),
     }
コード例 #3
0
ファイル: augmentation3d.py プロジェクト: jhacsonmeza/kornia
 def __init__(
     self,
     degrees: Union[
         torch.Tensor,
         float,
         Tuple[float, float, float],
         Tuple[Tuple[float, float], Tuple[float, float], Tuple[float, float]],
     ],
     resample: Union[str, int, Resample] = Resample.BILINEAR.name,
     return_transform: bool = False,
     same_on_batch: bool = False,
     align_corners: bool = False,
     p: float = 0.5,
     keepdim: bool = False,
 ) -> None:
     super(RandomRotation3D, self).__init__(
         p=p, return_transform=return_transform, same_on_batch=same_on_batch, keepdim=keepdim
     )
     self._device, self._dtype = _extract_device_dtype([degrees])
     self.degrees = degrees
     self.resample = Resample.get(resample)
     self.align_corners = align_corners
     self.flags: Dict[str, torch.Tensor] = dict(
         resample=torch.tensor(self.resample.value), align_corners=torch.tensor(align_corners)
     )
コード例 #4
0
ファイル: augmentation3d.py プロジェクト: jhacsonmeza/kornia
 def __init__(
     self,
     size: Union[int, Tuple[int, int, int]],
     align_corners: bool = True,
     resample: Union[str, int, Resample] = Resample.BILINEAR.name,
     return_transform: bool = False,
     p: float = 1.0,
     keepdim: bool = False,
 ) -> None:
     # same_on_batch is always True for CenterCrop
     # Since PyTorch does not support ragged tensor. So cropping function happens batch-wisely.
     super(CenterCrop3D, self).__init__(
         p=1.0, return_transform=return_transform, same_on_batch=True, p_batch=p, keepdim=keepdim
     )
     if isinstance(size, tuple):
         self.size = (size[0], size[1], size[2])
     elif isinstance(size, int):
         self.size = (size, size, size)
     else:
         raise Exception(f"Invalid size type. Expected (int, tuple(int, int int). Got: {size}.")
     self.resample = Resample.get(resample)
     self.align_corners = align_corners
     self.flags: Dict[str, torch.Tensor] = dict(
         interpolation=torch.tensor(self.resample.value), align_corners=torch.tensor(align_corners)
     )
コード例 #5
0
ファイル: functional.py プロジェクト: mehdidc/kornia
def apply_crop(input: torch.Tensor,
               params: Dict[str, torch.Tensor]) -> torch.Tensor:
    r"""Apply cropping by src bounding box and dst bounding box.
    Order: top-left, top-right, bottom-right and bottom-left. The coordinates must be in the x, y order.

    Args:
        input (torch.Tensor): Tensor to be transformed with shape (H, W), (C, H, W), (B, C, H, W).
        params (Dict[str, torch.Tensor]):
            - params['src']: The applied cropping src matrix :math: `(*, 4, 2)`.
            - params['dst']: The applied cropping dst matrix :math: `(*, 4, 2)`.
            - params['interpolation']: Integer tensor. NEAREST = 0, BILINEAR = 1.
            - params['align_corners']: Boolean tensor.

    Returns:
        torch.Tensor: The cropped input.
    """
    input = _transform_input(input)
    _validate_input_dtype(
        input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])

    resample_mode: str = Resample.get(
        params['interpolation'].item()).name.lower()  # type: ignore
    align_corners: bool = cast(bool, params['align_corners'].item())

    return crop_by_boxes(input,
                         params['src'],
                         params['dst'],
                         resample_mode,
                         align_corners=align_corners)
コード例 #6
0
ファイル: affine.py プロジェクト: kornia/kornia
 def __init__(
     self,
     degrees: Union[Tensor, float, Tuple[float, float]],
     translate: Optional[Union[Tensor, Tuple[float, float]]] = None,
     scale: Optional[Union[Tensor, Tuple[float, float],
                           Tuple[float, float, float, float]]] = None,
     shear: Optional[Union[Tensor, float, Tuple[float, float]]] = None,
     resample: Union[str, int, Resample] = Resample.BILINEAR.name,
     same_on_batch: bool = False,
     align_corners: bool = False,
     padding_mode: Union[str, int,
                         SamplePadding] = SamplePadding.ZEROS.name,
     p: float = 0.5,
     keepdim: bool = False,
     return_transform: Optional[bool] = None,
 ) -> None:
     super().__init__(p=p,
                      return_transform=return_transform,
                      same_on_batch=same_on_batch,
                      keepdim=keepdim)
     self._param_generator = cast(
         rg.AffineGenerator,
         rg.AffineGenerator(degrees, translate, scale, shear))
     self.flags = dict(resample=Resample.get(resample),
                       padding_mode=SamplePadding.get(padding_mode),
                       align_corners=align_corners)
コード例 #7
0
def random_perspective_generator(
    batch_size: int,
    height: int,
    width: int,
    p: float,
    distortion_scale: float,
    interpolation: Union[str, int, Resample] = Resample.BILINEAR.name,
    same_on_batch: bool = False,
    align_corners: bool = False,
) -> Dict[str, torch.Tensor]:
    r"""Get parameters for ``perspective`` for a random perspective transform.

    Args:
        batch_size (int): the tensor batch size.
        height (int) : height of the image.
        width (int): width of the image.
        p (float): probability of the image being applied perspective.
        distortion_scale (float): it controls the degree of distortion and ranges from 0 to 1. Default value is 0.5.
        interpolation (int, str or kornia.Resample): Default: Resample.BILINEAR
        same_on_batch (bool): apply the same transformation across the batch. Default: False
        align_corners(bool): interpolation flag. Default: False. See
        https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.interpolate for detail


    Returns:
        params (Dict[str, torch.Tensor])
    """
    params: Dict[str, torch.Tensor] = random_prob_generator(batch_size, p)
    start_points, end_points = (_get_perspective_params(
        batch_size, width, height, distortion_scale, same_on_batch))
    params['start_points'] = start_points
    params['end_points'] = end_points
    params['interpolation'] = torch.tensor(Resample.get(interpolation).value)
    params['align_corners'] = torch.tensor(align_corners)
    return params
コード例 #8
0
ファイル: augmentation3d.py プロジェクト: wyli/kornia
 def __init__(self,
              size: Tuple[int, int, int],
              padding: Optional[Union[int, Tuple[int, int, int],
                                      Tuple[int, int, int, int, int,
                                            int]]] = None,
              pad_if_needed: Optional[bool] = False,
              fill: int = 0,
              padding_mode: str = 'constant',
              resample: Union[str, int, Resample] = Resample.BILINEAR.name,
              return_transform: bool = False,
              same_on_batch: bool = False,
              align_corners: bool = True,
              p: float = 1.0) -> None:
     # Since PyTorch does not support ragged tensor. So cropping function happens batch-wisely.
     super(RandomCrop3D, self).__init__(p=1.,
                                        return_transform=return_transform,
                                        same_on_batch=same_on_batch,
                                        p_batch=p)
     self.size = size
     self.padding = padding
     self.pad_if_needed = pad_if_needed
     self.fill = fill
     self.padding_mode = padding_mode
     self.resample = Resample.get(resample)
     self.same_on_batch = same_on_batch
     self.align_corners = align_corners
     self.flags: Dict[str, torch.Tensor] = dict(
         interpolation=torch.tensor(self.resample.value),
         align_corners=torch.tensor(align_corners))
コード例 #9
0
ファイル: random_generator.py プロジェクト: v-prgmr/kornia
def random_perspective_generator(
        batch_size: int,
        height: int,
        width: int,
        p: float,
        distortion_scale: float,
        interpolation: Union[str, int, Resample] = Resample.BILINEAR.name,
        same_on_batch: bool = False) -> Dict[str, torch.Tensor]:
    r"""Get parameters for ``perspective`` for a random perspective transform.

    Args:
        batch_size (int): the tensor batch size.
        height (int) : height of the image.
        width (int): width of the image.
        p (float): probability of the image being applied perspective.
        distortion_scale (float): it controls the degree of distortion and ranges from 0 to 1. Default value is 0.5.
        interpolation (int, str or kornia.Resample): Default: Resample.BILINEAR
        same_on_batch (bool): apply the same transformation across the batch. Default: False

    Returns:
        List containing [top-left, top-right, bottom-right, bottom-left] of the original image,
        List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.
        The points are in -x order.
    """
    params: Dict[str, torch.Tensor] = random_prob_generator(batch_size, p)
    start_points, end_points = (_get_perspective_params(
        batch_size, width, height, distortion_scale, same_on_batch))
    params['start_points'] = start_points
    params['end_points'] = end_points
    params['interpolation'] = torch.tensor(Resample.get(interpolation).value)
    return params
コード例 #10
0
def random_crop_generator(
        batch_size: int,
        input_size: Tuple[int, int],
        size: Tuple[int, int],
        resize_to: Optional[Tuple[int, int]] = None,
        interpolation: Union[str, int, Resample] = Resample.BILINEAR.name,
        same_on_batch: bool = False,
        align_corners: bool = False) -> Dict[str, torch.Tensor]:
    r"""Get parameters for ```crop``` transformation for crop transform.

    Args:
        batch_size (int): the tensor batch size.
        input_size (tuple): Input image shape, like (h, w).
        size (tuple): Desired size of the crop operation, like (h, w).
        resize_to (tuple): Desired output size of the crop, like (h, w). If None, no resize will be performed.
        interpolation (int, str or kornia.Resample): Default: Resample.BILINEAR
        same_on_batch (bool): apply the same transformation across the batch. Default: False
        align_corners (bool): interpolation flag. Default: False.

    Returns:
        params Dict[str, torch.Tensor]: parameters to be passed for transformation.
     """
    x_diff = input_size[1] - size[1]
    y_diff = input_size[0] - size[0]

    if x_diff < 0 or y_diff < 0:
        raise ValueError(
            "input_size %s cannot be smaller than crop size %s in any dimension."
            % (str(input_size), str(size)))

    x_start = _adapted_uniform((batch_size, ), 0, x_diff + 1,
                               same_on_batch).long()
    y_start = _adapted_uniform((batch_size, ), 0, y_diff + 1,
                               same_on_batch).long()

    crop = torch.tensor([[
        [0, 0],
        [size[1] - 1, 0],
        [size[1] - 1, size[0] - 1],
        [0, size[0] - 1],
    ]]).repeat(batch_size, 1, 1)

    crop_src = crop.clone()
    crop_src[:, :, 0] += x_start.unsqueeze(dim=0).reshape(batch_size, 1)
    crop_src[:, :, 1] += y_start.unsqueeze(dim=0).reshape(batch_size, 1)

    if resize_to is None:
        crop_dst = crop
    else:
        crop_dst = torch.tensor([[
            [0, 0],
            [resize_to[1] - 1, 0],
            [resize_to[1] - 1, resize_to[0] - 1],
            [0, resize_to[0] - 1],
        ]]).repeat(batch_size, 1, 1)

    return dict(src=crop_src,
                dst=crop_dst,
                interpolation=torch.tensor(Resample.get(interpolation).value),
                align_corners=torch.tensor(align_corners))
コード例 #11
0
ファイル: augmentation3d.py プロジェクト: shijianjian/kornia
 def __init__(
     self,
     kernel_size: Union[int, Tuple[int, int]],
     angle: Union[
         torch.Tensor,
         float,
         Tuple[float, float, float],
         Tuple[Tuple[float, float], Tuple[float, float], Tuple[float, float]],
     ],
     direction: Union[torch.Tensor, float, Tuple[float, float]],
     border_type: Union[int, str, BorderType] = BorderType.CONSTANT.name,
     resample: Union[str, int, Resample] = Resample.NEAREST.name,
     return_transform: bool = False,
     same_on_batch: bool = False,
     p: float = 0.5,
     keepdim: bool = False,
 ) -> None:
     super().__init__(
         p=p, return_transform=return_transform, same_on_batch=same_on_batch, p_batch=1.0, keepdim=keepdim
     )
     self.flags = dict(
         border_type=BorderType.get(border_type),
         resample=Resample.get(resample),
     )
     self._param_generator = cast(rg.MotionBlurGenerator3D, rg.MotionBlurGenerator3D(kernel_size, angle, direction))
コード例 #12
0
    def __init__(
        self,
        size: Union[int, Tuple[int, int]],
        align_corners: bool = True,
        resample: Union[str, int, Resample] = Resample.BILINEAR.name,
        p: float = 1.0,
        keepdim: bool = False,
        cropping_mode: str = "slice",
        return_transform: Optional[bool] = None,
    ) -> None:
        # same_on_batch is always True for CenterCrop
        # Since PyTorch does not support ragged tensor. So cropping function happens batch-wisely.
        super().__init__(p=1.0,
                         return_transform=return_transform,
                         same_on_batch=True,
                         p_batch=p,
                         keepdim=keepdim)
        if isinstance(size, tuple):
            self.size = (size[0], size[1])
        elif isinstance(size, int):
            self.size = (size, size)
        else:
            raise Exception(
                f"Invalid size type. Expected (int, tuple(int, int). "
                f"Got: {type(size)}.")

        self.flags = dict(resample=Resample.get(resample),
                          cropping_mode=cropping_mode,
                          align_corners=align_corners,
                          size=self.size)
コード例 #13
0
ファイル: functional3d.py プロジェクト: zeta1999/kornia
def apply_crop3d(input: torch.Tensor, params: Dict[str, torch.Tensor], flags: Dict[str, torch.Tensor]) -> torch.Tensor:
    r"""Apply cropping by src bounding box and dst bounding box.

    Args:
        input (torch.Tensor): Tensor to be transformed with shape :math:`(*, C, D, H, W)`.
        params (Dict[str, torch.Tensor]):
            - params['src']: The applied cropping src matrix :math: `(*, 8, 3)`.
            - params['dst']: The applied cropping dst matrix :math: `(*, 8, 3)`.
        flags (Dict[str, torch.Tensor]):
            - params['interpolation']: Integer tensor. NEAREST = 0, BILINEAR = 1.
            - params['align_corners']: Boolean tensor.

    Returns:
        torch.Tensor: The cropped input.

    Note:
        BBox order: front-top-left, front-top-right, front-bottom-right, front-bottom-left, back-top-left,
        back-top-right, back-bottom-right, back-bottom-left. The coordinates must be in x, y, z order.
    """

    resample_mode: str = Resample.get(flags['interpolation'].item()).name.lower()  # type: ignore
    align_corners: bool = cast(bool, flags['align_corners'].item())

    return crop_by_boxes3d(
        input, params['src'], params['dst'], resample_mode, align_corners=align_corners)
コード例 #14
0
def random_rotation_generator(
        batch_size: int,
        degrees: torch.Tensor,
        interpolation: Union[str, int, Resample] = Resample.BILINEAR.name,
        same_on_batch: bool = False,
        align_corners: bool = False) -> Dict[str, torch.Tensor]:
    r"""Get parameters for ``rotate`` for a random rotate transform.

    Args:
        batch_size (int): the tensor batch size.
        degrees (torch.Tensor): range of degrees with shape (2) to select from.
        interpolation (int, str or kornia.Resample): Default: Resample.BILINEAR
        same_on_batch (bool): apply the same transformation across the batch. Default: False
        align_corners (bool): interpolation flag. Default: False.

    Returns:
        params Dict[str, torch.Tensor]: parameters to be passed for transformation.
    """
    _joint_range_check(degrees, "degrees")

    degrees = _adapted_uniform((batch_size, ), degrees[0], degrees[1],
                               same_on_batch)

    return dict(degrees=degrees,
                interpolation=torch.tensor(Resample.get(interpolation).value),
                align_corners=torch.tensor(align_corners))
コード例 #15
0
 def __init__(
     self,
     size: Tuple[int, int],
     scale: Union[Tensor, Tuple[float, float]] = (0.08, 1.0),
     ratio: Union[Tensor, Tuple[float, float]] = (3.0 / 4.0, 4.0 / 3.0),
     resample: Union[str, int, Resample] = Resample.BILINEAR.name,
     same_on_batch: bool = False,
     align_corners: bool = True,
     p: float = 1.0,
     keepdim: bool = False,
     cropping_mode: str = "slice",
     return_transform: Optional[bool] = None,
 ) -> None:
     # Since PyTorch does not support ragged tensor. So cropping function happens all the time.
     super().__init__(p=1.0,
                      return_transform=return_transform,
                      same_on_batch=same_on_batch,
                      p_batch=p,
                      keepdim=keepdim)
     self._param_generator = cast(
         rg.ResizedCropGenerator,
         rg.ResizedCropGenerator(size, scale, ratio))
     self.flags = dict(size=size,
                       resample=Resample.get(resample),
                       align_corners=align_corners,
                       cropping_mode=cropping_mode)
コード例 #16
0
def apply_crop3d(input: torch.Tensor, params: Dict[str, torch.Tensor],
                 flags: Dict[str, torch.Tensor]) -> torch.Tensor:
    r"""Apply cropping by src bounding box and dst bounding box.

    Order: front-top-left, front-top-right, front-bottom-right, front-bottom-left, back-top-left,
        back-top-right, back-bottom-right, back-bottom-left. The coordinates must be in x, y, z order.
            - params['src']: The applied cropping src matrix :math: `(*, 8, 3)`.
            - params['dst']: The applied cropping dst matrix :math: `(*, 8, 3)`.
        flags (Dict[str, torch.Tensor]):
            - params['interpolation']: Integer tensor. NEAREST = 0, BILINEAR = 1.
            - params['align_corners']: Boolean tensor.

    Returns:
        torch.Tensor: The cropped input.
    """
    input = _transform_input3d(input)
    _validate_input_dtype(
        input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])

    resample_mode: str = Resample.get(
        flags['interpolation'].item()).name.lower()  # type: ignore
    align_corners: bool = cast(bool, flags['align_corners'].item())

    return crop_by_boxes3d(input,
                           params['src'],
                           params['dst'],
                           resample_mode,
                           align_corners=align_corners)
コード例 #17
0
def random_rotation_generator3d(
        batch_size: int,
        degrees: torch.Tensor,
        interpolation: Union[str, int, Resample] = Resample.BILINEAR.name,
        same_on_batch: bool = False,
        align_corners: bool = False) -> Dict[str, torch.Tensor]:
    r"""Get parameters for ``rotate`` for a random rotate transform.

    Args:
        batch_size (int): the tensor batch size.
        degrees (torch.Tensor): Ranges of degrees (3, 2) for yaw, pitch and roll.
        interpolation (int, str or kornia.Resample): Default: Resample.BILINEAR
        same_on_batch (bool): apply the same transformation across the batch. Default: False
        align_corners (bool): interpolation flag. Default: False.

    Returns:
        params Dict[str, torch.Tensor]: parameters to be passed for transformation.
    """
    assert degrees.shape == torch.Size(
        [3, 2]), f"'degrees' must be the shape of (3, 2). Got {degrees.shape}."
    yaw = _adapted_uniform((batch_size, ), degrees[0][0], degrees[0][1],
                           same_on_batch)
    pitch = _adapted_uniform((batch_size, ), degrees[1][0], degrees[1][1],
                             same_on_batch)
    roll = _adapted_uniform((batch_size, ), degrees[2][0], degrees[2][1],
                            same_on_batch)

    return dict(yaw=yaw,
                pitch=pitch,
                roll=roll,
                interpolation=torch.tensor(Resample.get(interpolation).value),
                align_corners=torch.tensor(align_corners))
コード例 #18
0
 def __init__(
     self,
     size: Tuple[int, int, int],
     padding: Optional[Union[int, Tuple[int, int, int], Tuple[int, int, int, int, int, int]]] = None,
     pad_if_needed: Optional[bool] = False,
     fill: int = 0,
     padding_mode: str = "constant",
     resample: Union[str, int, Resample] = Resample.BILINEAR.name,
     return_transform: Optional[bool] = None,
     same_on_batch: bool = False,
     align_corners: bool = True,
     p: float = 1.0,
     keepdim: bool = False,
 ) -> None:
     # Since PyTorch does not support ragged tensor. So cropping function happens batch-wisely.
     super().__init__(
         p=1.0, return_transform=return_transform, same_on_batch=same_on_batch, p_batch=p, keepdim=keepdim
     )
     self.flags = dict(
         size=size,
         padding=padding,
         pad_if_needed=pad_if_needed,
         padding_mode=padding_mode,
         fill=fill,
         resample=Resample.get(resample),
         align_corners=align_corners,
     )
     self._param_generator = cast(rg.CropGenerator3D, rg.CropGenerator3D(size, None))
コード例 #19
0
ファイル: functional.py プロジェクト: rubaha96/kornia
def apply_crop(input: torch.Tensor,
               params: Dict[str, torch.Tensor],
               return_transform: bool = False) -> UnionType:
    """
    Args:
        params (dict): A dict that must have {'src': torch.Tensor, 'dst': torch.Tensor}. Can be generated from
        kornia.augmentation.random_generator.random_crop_generator
        return_transform (bool): if ``True`` return the matrix describing the transformation applied to each
        input tensor.
    Returns:
        torch.Tensor: The grayscaled input
        torch.Tensor: The applied cropping matrix :math: `(*, 4, 2)` if return_transform flag
        is set to ``True``
    """
    input = _transform_input(input)
    _validate_input_dtype(
        input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])

    return crop_by_boxes(
        input,
        params['src'],
        params['dst'],
        Resample.get(
            params['interpolation'].item()).name.lower(),  # type: ignore
        return_transform=return_transform)
コード例 #20
0
def random_perspective_generator(
    batch_size: int,
    height: int,
    width: int,
    p: float,
    distortion_scale: torch.Tensor,
    interpolation: Union[str, int, Resample] = Resample.BILINEAR.name,
    same_on_batch: bool = False,
    align_corners: bool = False,
) -> Dict[str, torch.Tensor]:
    r"""Get parameters for ``perspective`` for a random perspective transform.

    Args:
        batch_size (int): the tensor batch size.
        height (int) : height of the image.
        width (int): width of the image.
        p (float): probability of the image being applied perspective.
        distortion_scale (torch.Tensor): it controls the degree of distortion and ranges from 0 to 1.
        interpolation (int, str or kornia.Resample): Default: Resample.BILINEAR
        same_on_batch (bool): apply the same transformation across the batch. Default: False
        align_corners(bool): interpolation flag. Default: False. See
        https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.interpolate for detail

    Returns:
        params (Dict[str, torch.Tensor])
    """
    assert distortion_scale.dim() == 0 and 0 <= distortion_scale <= 1, \
        f"'distortion_scale' must be a scalar within [0, 1]. Got {distortion_scale}"

    batch_prob: torch.Tensor = random_prob_generator(batch_size, p, same_on_batch=same_on_batch)['batch_prob']

    start_points: torch.Tensor = torch.tensor([[
        [0., 0],
        [width - 1, 0],
        [width - 1, height - 1],
        [0, height - 1],
    ]]).expand(batch_size, -1, -1)

    # generate random offset not larger than half of the image
    fx = distortion_scale * width / 2
    fy = distortion_scale * height / 2

    factor = torch.tensor([fx, fy]).view(-1, 1, 2)

    rand_val: torch.Tensor = _adapted_uniform((batch_size, 4, 2), 0, 1, same_on_batch)
    pts_norm = torch.tensor([[
        [1, 1],
        [-1, 1],
        [-1, -1],
        [1, -1]
    ]])
    end_points = start_points + factor * rand_val * pts_norm

    return dict(batch_prob=batch_prob,
                start_points=start_points,
                end_points=end_points,
                interpolation=torch.tensor(Resample.get(interpolation).value),
                align_corners=torch.tensor(align_corners))
コード例 #21
0
def _get_random_affine_params(
        batch_size: int,
        height: int,
        width: int,
        degrees: TupleFloat,
        translate: Optional[TupleFloat],
        scales: Optional[TupleFloat],
        shears: Optional[TupleFloat],
        resample: Union[str, int, Resample] = Resample.BILINEAR.name,
        same_on_batch: bool = False,
        align_corners: bool = False) -> Dict[str, torch.Tensor]:
    r"""Get parameters for ```affine``` transformation random affine transform.
    The returned matrix is Bx3x3.

    Returns:
        params Dict[str, torch.Tensor]: parameters to be passed for transformation.
    """
    angle = _adapted_uniform((batch_size, ), degrees[0], degrees[1],
                             same_on_batch)

    # compute tensor ranges
    if scales is not None:
        scale = _adapted_uniform((batch_size, ), scales[0], scales[1],
                                 same_on_batch)
    else:
        scale = torch.ones(batch_size)

    if translate is not None:
        max_dx: float = translate[0] * width
        max_dy: float = translate[1] * height
        translations = torch.stack([
            _adapted_uniform((batch_size, ), -max_dx, max_dx, same_on_batch),
            _adapted_uniform((batch_size, ), -max_dy, max_dy, same_on_batch)
        ],
                                   dim=-1)
    else:
        translations = torch.zeros(batch_size, 2)

    center: torch.Tensor = torch.tensor(
        [width, height], dtype=torch.float32).view(1, 2) / 2. - 0.5
    center = center.expand(batch_size, -1)

    if shears is not None:
        sx = _adapted_uniform((batch_size, ), shears[0], shears[1],
                              same_on_batch)
        sy = _adapted_uniform((batch_size, ), shears[0], shears[1],
                              same_on_batch)
    else:
        sx = sy = torch.tensor([0] * batch_size)

    return dict(translations=translations,
                center=center,
                scale=scale,
                angle=angle,
                sx=sx,
                sy=sy,
                resample=torch.tensor(Resample.get(resample).value),
                align_corners=torch.tensor(align_corners))
コード例 #22
0
ファイル: augmentation.py プロジェクト: iimog/kornia
 def __init__(
     self, degrees: FloatUnionType, interpolation: Union[str, int, Resample] = Resample.BILINEAR.name,
     return_transform: bool = False, same_on_batch: bool = False, align_corners: bool = False
 ) -> None:
     super(RandomRotation, self).__init__(return_transform)
     self.degrees = degrees
     self.interpolation: Resample = Resample.get(interpolation)
     self.same_on_batch = same_on_batch
     self.align_corners = align_corners
コード例 #23
0
 def __init__(
     self, degrees: Union[torch.Tensor, float, Tuple[float, float, float],
                          Tuple[Tuple[float, float], Tuple[float, float], Tuple[float, float]]],
     interpolation: Union[str, int, Resample] = Resample.BILINEAR.name,
     return_transform: bool = False, same_on_batch: bool = False, align_corners: bool = False
 ) -> None:
     super(RandomRotation3D, self).__init__(return_transform)
     self.degrees = degrees
     self.interpolation: Resample = Resample.get(interpolation)
     self.same_on_batch = same_on_batch
     self.align_corners = align_corners
コード例 #24
0
 def __init__(self,
              distortion_scale: float = 0.5,
              p: float = 0.5,
              interpolation: Union[str, int,
                                   Resample] = Resample.BILINEAR.name,
              return_transform: bool = False,
              same_on_batch: bool = False) -> None:
     super(RandomPerspective, self).__init__(return_transform)
     self.p: float = p
     self.distortion_scale: float = distortion_scale
     self.interpolation: Resample = Resample.get(interpolation)
     self.same_on_batch = same_on_batch
コード例 #25
0
ファイル: functional.py プロジェクト: manyids2/kornia-1
def random_affine(
    input: torch.Tensor,
    degrees: Union[float, Tuple[float, float]],
    translate: Optional[Tuple[float, float]] = None,
    scale: Optional[Tuple[float, float]] = None,
    shear: Optional[Union[float, Tuple[float, float]]] = None,
    resample: Union[str, int, Resample] = Resample.BILINEAR.name,
    return_transform: bool = False
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
    r"""Generate params and apply operation on input tensor.
    See :func:`~kornia.augmentation.random_generator.random_affine_generator` for details.
    See :func:`~kornia.augmentation.functional.apply_affine` for details.
    """
    __deprecation_warning("random_affine", "kornia.augmentation.RandomAffine")
    input = _transform_input(input)
    batch_size, _, height, width = input.size()

    _degrees: torch.Tensor = _range_bound(degrees, 'degrees', 0, (-360, 360))
    _translate: Optional[torch.Tensor] = None
    _scale: Optional[torch.Tensor] = None
    _shear: Optional[torch.Tensor] = None
    if translate is not None:
        _translate = _range_bound(translate,
                                  'translate',
                                  bounds=(0, 1),
                                  check='singular')
    if scale is not None:
        _scale = _range_bound(scale,
                              'scale',
                              bounds=(0, float('inf')),
                              check='singular')
    if shear is not None:
        _shear = cast(torch.Tensor, shear) if isinstance(
            shear, torch.Tensor) else torch.tensor(shear)
        _shear = torch.stack([
            _range_bound(_shear if _shear.dim() == 0 else _shear[:2],
                         'shear-x', 0, (-360, 360)),
            torch.tensor([0, 0]) if _shear.dim() == 0 or len(_shear) == 2 else
            _range_bound(_shear[2:], 'shear-y', 0, (-360, 360))
        ])
    params: Dict[str, torch.Tensor] = rg.random_affine_generator(
        batch_size, height, width, _degrees, _translate, _scale, _shear)
    output = apply_affine(
        input, params, {
            'resample': torch.tensor(Resample.get(resample).value),
            'padding_mode': torch.tensor(0),
            'align_corners': torch.tensor(True)
        })
    if return_transform:
        transform = compute_affine_transformation(input, params)
        return output, transform
    return output
コード例 #26
0
 def __init__(
     self,
     distortion_scale: Union[Tensor, float] = 0.5,
     resample: Union[str, int, Resample] = Resample.BILINEAR.name,
     same_on_batch: bool = False,
     align_corners: bool = False,
     p: float = 0.5,
     keepdim: bool = False,
     return_transform: Optional[bool] = None,
 ) -> None:
     super().__init__(p=p, return_transform=return_transform, same_on_batch=same_on_batch, keepdim=keepdim)
     self.flags = dict(resample=Resample.get(resample), align_corners=align_corners)
     self._param_generator = cast(rg.PerspectiveGenerator3D, rg.PerspectiveGenerator3D(distortion_scale))
コード例 #27
0
ファイル: augmentation3d.py プロジェクト: youtang1993/kornia
 def __init__(self, size: Union[int, Tuple[int, int, int]], align_corners: bool = True,
              resample: Union[str, int, Resample] = Resample.BILINEAR.name,
              return_transform: bool = False, p: float = 1., keepdim: bool = False) -> None:
     # same_on_batch is always True for CenterCrop
     # Since PyTorch does not support ragged tensor. So cropping function happens batch-wisely.
     super(CenterCrop3D, self).__init__(
         p=1., return_transform=return_transform, same_on_batch=True, p_batch=p, keepdim=keepdim)
     self.size = size
     self.resample = Resample.get(resample)
     self.align_corners = align_corners
     self.flags: Dict[str, torch.Tensor] = dict(
         interpolation=torch.tensor(self.resample.value),
         align_corners=torch.tensor(align_corners)
     )
コード例 #28
0
ファイル: augmentation.py プロジェクト: iimog/kornia
 def __init__(
     self, degrees: UnionFloat, translate: Optional[TupleFloat] = None, scale: Optional[TupleFloat] = None,
     shear: Optional[UnionFloat] = None, resample: Union[str, int, Resample] = Resample.BILINEAR.name,
     return_transform: bool = False, same_on_batch: bool = False,
     align_corners: bool = False
 ) -> None:
     super(RandomAffine, self).__init__(return_transform)
     self.degrees = degrees
     self.translate = translate
     self.scale = scale
     self.shear = shear
     self.resample: Resample = Resample.get(resample)
     self.same_on_batch = same_on_batch
     self.align_corners = align_corners
コード例 #29
0
 def __init__(self,
              size: Tuple[int, int],
              scale: Tuple[float, float] = (0.08, 1.0),
              ratio: Tuple[float, float] = (1.75, 4. / 3.),
              interpolation: Union[str, int,
                                   Resample] = Resample.BILINEAR.name,
              return_transform: bool = False,
              same_on_batch: bool = False) -> None:
     super(RandomResizedCrop, self).__init__(return_transform)
     self.size = size
     self.scale = scale
     self.ratio = ratio
     self.interpolation: Resample = Resample.get(interpolation)
     self.same_on_batch = same_on_batch
コード例 #30
0
ファイル: augmentation3d.py プロジェクト: youtang1993/kornia
 def __init__(
     self, distortion_scale: Union[torch.Tensor, float] = 0.5,
     resample: Union[str, int, Resample] = Resample.BILINEAR.name,
     return_transform: bool = False, same_on_batch: bool = False,
     align_corners: bool = False, p: float = 0.5, keepdim: bool = False
 ) -> None:
     super(RandomPerspective3D, self).__init__(p=p, return_transform=return_transform, same_on_batch=same_on_batch,
                                               keepdim=keepdim)
     self.distortion_scale = cast(torch.Tensor, distortion_scale) \
         if isinstance(distortion_scale, torch.Tensor) else torch.tensor(distortion_scale)
     self.resample = Resample.get(resample)
     self.align_corners = align_corners
     self.flags: Dict[str, torch.Tensor] = dict(
         interpolation=torch.tensor(self.resample.value),
         align_corners=torch.tensor(align_corners)
     )