Ejemplo n.º 1
0
 def __init__(self,
              mode: str = 'sobel',
              order: int = 1,
              normalized: bool = True, 
              diagonal=False, 
              laplacian=False, 
              second_order=False) -> None:
     super(SpatialGradient, self).__init__()
     self.normalized: bool = normalized
     self.order: int = order
     self.mode: str = mode
     self.kernel = get_spatial_gradient_kernel2d(mode, order)
     kernels = []
     if diagonal:
         kernel_dx: torch.Tensor = _get_sobel_diag_x_kernel_3x3()
         kernel_dy: torch.Tensor = _get_sobel_diag_y_kernel_3x3()
         kernels += [kernel_dx, kernel_dy]
     if laplacian:
         kernel_lap: torch.Tensor = _get_laplacian_kernel_3x3()
         kernels += [kernel_lap]
     if second_order:
         kernel_2x: torch.Tensor = _get_second_order_kernel_3x3()
         kernel_2y: torch.Tensor = kernel_2x.transpose(0, 1) 
         kernels += [kernel_2x, kernel_2y]
     if len(kernels)>0:
         kernels = torch.stack(kernels)
         self.kernel = torch.cat([self.kernel, kernels], dim=0)
     if self.normalized:
         self.kernel = normalize_kernel2d(self.kernel)
     return
Ejemplo n.º 2
0
 def __init__(self,
              mode: str = 'sobel',
              order: int = 1,
              normalized: bool = True,
              coord: str = "xy",
              device: str = "cpu",
              dtype: torch.dtype = torch.float) -> None:
     super(SpatialGradient, self).__init__()
     self.normalized: bool = normalized
     self.order: int = order
     self.mode: str = mode
     self.kernel: torch.Tensor = get_spatial_gradient_kernel2d(
         mode, order, coord)
     if self.normalized:
         self.kernel = normalize_kernel2d(self.kernel)
     # Pad with "replicate for spatial dims, but with zeros for channel
     self.spatial_pad = [
         self.kernel.size(1) // 2,
         self.kernel.size(1) // 2,
         self.kernel.size(2) // 2,
         self.kernel.size(2) // 2
     ]
     # Prepare kernel
     self.kernel: torch.Tensor = self.kernel.to(device).to(dtype).detach()
     self.kernel: torch.Tensor = self.kernel.unsqueeze(1).unsqueeze(1)
     self.kernel: torch.Tensor = self.kernel.flip(-3)
     return
Ejemplo n.º 3
0
def spatial_gradient(input: torch.Tensor,
                     mode: str = 'sobel',
                     order: int = 1,
                     normalized: bool = True) -> torch.Tensor:
    r"""Compute the first order image derivative in both x and y using a Sobel
    operator.

    .. image:: _static/img/spatial_gradient.png

    Args:
        input: input image tensor with shape :math:`(B, C, H, W)`.
        mode: derivatives modality, can be: `sobel` or `diff`.
        order: the order of the derivatives.
        normalized: whether the output is normalized.

    Return:
        the derivatives of the input feature map. with shape :math:`(B, C, 2, H, W)`.

    .. note::
       See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/
       filtering_edges.html>`__.

    Examples:
        >>> input = torch.rand(1, 3, 4, 4)
        >>> output = spatial_gradient(input)  # 1x3x2x4x4
        >>> output.shape
        torch.Size([1, 3, 2, 4, 4])
    """
    if not isinstance(input, torch.Tensor):
        raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")

    if not len(input.shape) == 4:
        raise ValueError(
            f"Invalid input shape, we expect BxCxHxW. Got: {input.shape}")
    # allocate kernel
    kernel: torch.Tensor = get_spatial_gradient_kernel2d(mode, order)
    if normalized:
        kernel = normalize_kernel2d(kernel)

    # prepare kernel
    b, c, h, w = input.shape
    tmp_kernel: torch.Tensor = kernel.to(input).detach()
    tmp_kernel = tmp_kernel.unsqueeze(1).unsqueeze(1)

    # convolve input tensor with sobel kernel
    kernel_flip: torch.Tensor = tmp_kernel.flip(-3)

    # Pad with "replicate for spatial dims, but with zeros for channel
    spatial_pad = [
        kernel.size(1) // 2,
        kernel.size(1) // 2,
        kernel.size(2) // 2,
        kernel.size(2) // 2
    ]
    out_channels: int = 3 if order == 2 else 2
    padded_inp: torch.Tensor = F.pad(input.reshape(b * c, 1, h, w),
                                     spatial_pad, 'replicate')[:, :, None]

    return F.conv3d(padded_inp, kernel_flip,
                    padding=0).view(b, c, out_channels, h, w)
Ejemplo n.º 4
0
 def __init__(self, order=1, normalized=True):
     super(SpatialGradient, self).__init__()
     self.normalized = normalized
     self.order = order
     self.mode = "sobel"
     self.kernel = get_spatial_gradient_kernel2d(self.mode, order)
     if self.normalized:
         self.kernel = normalize_kernel2d(self.kernel)
     return
Ejemplo n.º 5
0
def spatial_gradient(input: torch.Tensor,
                     mode: str = 'sobel',
                     order: int = 1,
                     normalized: bool = True) -> torch.Tensor:
    r"""Computes the first order image derivative in both x and y using a Sobel
    operator.

    Args:
        input (torch.Tensor): input image tensor with shape :math:`(B, C, H, W)`.
        mode (str): derivatives modality, can be: `sobel` or `diff`. Default: `sobel`.
        order (int): the order of the derivatives. Default: 1.
        normalized (bool): whether the output is normalized. Default: True.

    Return:
        torch.Tensor: the derivatives of the input feature map. with shape :math:`(B, C, 2, H, W)`.

    Examples:
        >>> input = torch.rand(1, 3, 4, 4)
        >>> output = spatial_gradient(input)  # 1x3x2x4x4
        >>> output.shape
        torch.Size([1, 3, 2, 4, 4])
    """
    if not isinstance(input, torch.Tensor):
        raise TypeError("Input type is not a torch.Tensor. Got {}".format(
            type(input)))

    if not len(input.shape) == 4:
        raise ValueError(
            "Invalid input shape, we expect BxCxHxW. Got: {}".format(
                input.shape))
    # allocate kernel
    kernel: torch.Tensor = get_spatial_gradient_kernel2d(mode, order)
    if normalized:
        kernel = normalize_kernel2d(kernel)

    # prepare kernel
    b, c, h, w = input.shape
    tmp_kernel: torch.Tensor = kernel.to(input).detach()
    tmp_kernel = tmp_kernel.unsqueeze(1).unsqueeze(1)

    # convolve input tensor with sobel kernel
    kernel_flip: torch.Tensor = tmp_kernel.flip(-3)

    # Pad with "replicate for spatial dims, but with zeros for channel
    spatial_pad = [
        kernel.size(1) // 2,
        kernel.size(1) // 2,
        kernel.size(2) // 2,
        kernel.size(2) // 2
    ]
    out_channels: int = 3 if order == 2 else 2
    padded_inp: torch.Tensor = F.pad(input.reshape(b * c, 1, h, w),
                                     spatial_pad, 'replicate')[:, :, None]

    return F.conv3d(padded_inp, kernel_flip,
                    padding=0).view(b, c, out_channels, h, w)
Ejemplo n.º 6
0
 def __init__(self,
              mode: str = 'sobel',
              order: int = 1,
              normalized: bool = True) -> None:
     super(SpatialGradient, self).__init__()
     self.normalized: bool = normalized
     self.order: int = order
     self.mode: str = mode
     self.kernel = get_spatial_gradient_kernel2d(mode, order)
     if self.normalized:
         self.kernel = normalize_kernel2d(self.kernel)
     return
Ejemplo n.º 7
0
 def __init__(self, mode: str = 'sobel', order: int = 1) -> None:
     super(SpatialGradient, self).__init__()
     self.kernel = get_spatial_gradient_kernel2d(mode, order)
     return