def __init__(self, mode: str = 'sobel', order: int = 1, normalized: bool = True, coord: str = "xy", device: str = "cpu", dtype: torch.dtype = torch.float) -> None: super(SpatialGradient, self).__init__() self.normalized: bool = normalized self.order: int = order self.mode: str = mode self.kernel: torch.Tensor = get_spatial_gradient_kernel2d( mode, order, coord) if self.normalized: self.kernel = normalize_kernel2d(self.kernel) # Pad with "replicate for spatial dims, but with zeros for channel self.spatial_pad = [ self.kernel.size(1) // 2, self.kernel.size(1) // 2, self.kernel.size(2) // 2, self.kernel.size(2) // 2 ] # Prepare kernel self.kernel: torch.Tensor = self.kernel.to(device).to(dtype).detach() self.kernel: torch.Tensor = self.kernel.unsqueeze(1).unsqueeze(1) self.kernel: torch.Tensor = self.kernel.flip(-3) return
def laplacian(input: torch.Tensor, kernel_size: int, border_type: str = 'reflect', normalized: bool = True) -> torch.Tensor: r"""Creates an operator that returns a tensor using a Laplacian filter. The operator smooths the given tensor with a laplacian kernel by convolving it to each channel. It supports batched operation. Arguments: input (torch.Tensor): the input image tensor with shape :math:`(B, C, H, W)`. kernel_size (int): the size of the kernel. border_type (str): the padding mode to be applied before convolving. The expected modes are: ``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'reflect'``. normalized (bool): if True, L1 norm of the kernel is set to 1. Return: torch.Tensor: the blurred image with shape :math:`(B, C, H, W)`. Examples: >>> input = torch.rand(2, 4, 5, 5) >>> output = laplacian(input, 3) >>> output.shape torch.Size([2, 4, 5, 5]) """ kernel: torch.Tensor = torch.unsqueeze(get_laplacian_kernel2d(kernel_size), dim=0) if normalized: kernel = normalize_kernel2d(kernel) return kornia.filter2d(input, kernel, border_type)
def __init__(self, mode: str = 'sobel', order: int = 1, normalized: bool = True, diagonal=False, laplacian=False, second_order=False) -> None: super(SpatialGradient, self).__init__() self.normalized: bool = normalized self.order: int = order self.mode: str = mode self.kernel = get_spatial_gradient_kernel2d(mode, order) kernels = [] if diagonal: kernel_dx: torch.Tensor = _get_sobel_diag_x_kernel_3x3() kernel_dy: torch.Tensor = _get_sobel_diag_y_kernel_3x3() kernels += [kernel_dx, kernel_dy] if laplacian: kernel_lap: torch.Tensor = _get_laplacian_kernel_3x3() kernels += [kernel_lap] if second_order: kernel_2x: torch.Tensor = _get_second_order_kernel_3x3() kernel_2y: torch.Tensor = kernel_2x.transpose(0, 1) kernels += [kernel_2x, kernel_2y] if len(kernels)>0: kernels = torch.stack(kernels) self.kernel = torch.cat([self.kernel, kernels], dim=0) if self.normalized: self.kernel = normalize_kernel2d(self.kernel) return
def spatial_gradient(input: torch.Tensor, mode: str = 'sobel', order: int = 1, normalized: bool = True) -> torch.Tensor: r"""Compute the first order image derivative in both x and y using a Sobel operator. .. image:: _static/img/spatial_gradient.png Args: input: input image tensor with shape :math:`(B, C, H, W)`. mode: derivatives modality, can be: `sobel` or `diff`. order: the order of the derivatives. normalized: whether the output is normalized. Return: the derivatives of the input feature map. with shape :math:`(B, C, 2, H, W)`. .. note:: See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/ filtering_edges.html>`__. Examples: >>> input = torch.rand(1, 3, 4, 4) >>> output = spatial_gradient(input) # 1x3x2x4x4 >>> output.shape torch.Size([1, 3, 2, 4, 4]) """ if not isinstance(input, torch.Tensor): raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}") if not len(input.shape) == 4: raise ValueError( f"Invalid input shape, we expect BxCxHxW. Got: {input.shape}") # allocate kernel kernel: torch.Tensor = get_spatial_gradient_kernel2d(mode, order) if normalized: kernel = normalize_kernel2d(kernel) # prepare kernel b, c, h, w = input.shape tmp_kernel: torch.Tensor = kernel.to(input).detach() tmp_kernel = tmp_kernel.unsqueeze(1).unsqueeze(1) # convolve input tensor with sobel kernel kernel_flip: torch.Tensor = tmp_kernel.flip(-3) # Pad with "replicate for spatial dims, but with zeros for channel spatial_pad = [ kernel.size(1) // 2, kernel.size(1) // 2, kernel.size(2) // 2, kernel.size(2) // 2 ] out_channels: int = 3 if order == 2 else 2 padded_inp: torch.Tensor = F.pad(input.reshape(b * c, 1, h, w), spatial_pad, 'replicate')[:, :, None] return F.conv3d(padded_inp, kernel_flip, padding=0).view(b, c, out_channels, h, w)
def filter2D(input: torch.Tensor, kernel: torch.Tensor, border_type: str = 'reflect', normalized: bool = False) -> torch.Tensor: r"""Function that convolves a tensor with a kernel. The function applies a given kernel to a tensor. The kernel is applied independently at each depth channel of the tensor. Before applying the kernel, the function applies padding according to the specified mode so that the output remains in the same shape. Args: input (torch.Tensor): the input tensor with shape of :math:`(B, C, H, W)`. kernel (torch.Tensor): the kernel to be convolved with the input tensor. The kernel shape must be :math:`(1, kH, kW)`. border_type (str): the padding mode to be applied before convolving. The expected modes are: ``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'reflect'``. normalized (bool): If True, kernel will be L1 normalized. Return: torch.Tensor: the convolved tensor of same size and numbers of channels as the input. """ if not isinstance(input, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}" .format(type(input))) if not isinstance(kernel, torch.Tensor): raise TypeError("Input kernel type is not a torch.Tensor. Got {}" .format(type(kernel))) if not isinstance(border_type, str): raise TypeError("Input border_type is not string. Got {}" .format(type(kernel))) if not len(input.shape) == 4: raise ValueError("Invalid input shape, we expect BxCxHxW. Got: {}" .format(input.shape)) if not len(kernel.shape) == 3: raise ValueError("Invalid kernel shape, we expect 1xHxW. Got: {}" .format(kernel.shape)) borders_list: List[str] = ['constant', 'reflect', 'replicate', 'circular'] if border_type not in borders_list: raise ValueError("Invalid border_type, we expect the following: {0}." "Got: {1}".format(borders_list, border_type)) # prepare kernel b, c, h, w = input.shape tmp_kernel: torch.Tensor = kernel.unsqueeze(0).to(input.device).to(input.dtype) if normalized: tmp_kernel = normalize_kernel2d(tmp_kernel) # pad the input tensor height, width = tmp_kernel.shape[-2:] padding_shape: List[int] = compute_padding((height, width)) input_pad: torch.Tensor = F.pad(input.view(b * c, 1, h, w), padding_shape, mode=border_type) # convolve the tensor with the kernel return F.conv2d(input_pad, tmp_kernel, padding=0, stride=1).view(b, c, h, w)
def __init__(self, order=1, normalized=True): super(SpatialGradient, self).__init__() self.normalized = normalized self.order = order self.mode = "sobel" self.kernel = get_spatial_gradient_kernel2d(self.mode, order) if self.normalized: self.kernel = normalize_kernel2d(self.kernel) return
def spatial_gradient(input: torch.Tensor, mode: str = 'sobel', order: int = 1, normalized: bool = True) -> torch.Tensor: r"""Computes the first order image derivative in both x and y using a Sobel operator. Args: input (torch.Tensor): input image tensor with shape :math:`(B, C, H, W)`. mode (str): derivatives modality, can be: `sobel` or `diff`. Default: `sobel`. order (int): the order of the derivatives. Default: 1. normalized (bool): whether the output is normalized. Default: True. Return: torch.Tensor: the derivatives of the input feature map. with shape :math:`(B, C, 2, H, W)`. Examples: >>> input = torch.rand(1, 3, 4, 4) >>> output = spatial_gradient(input) # 1x3x2x4x4 >>> output.shape torch.Size([1, 3, 2, 4, 4]) """ if not isinstance(input, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(input))) if not len(input.shape) == 4: raise ValueError( "Invalid input shape, we expect BxCxHxW. Got: {}".format( input.shape)) # allocate kernel kernel: torch.Tensor = get_spatial_gradient_kernel2d(mode, order) if normalized: kernel = normalize_kernel2d(kernel) # prepare kernel b, c, h, w = input.shape tmp_kernel: torch.Tensor = kernel.to(input).detach() tmp_kernel = tmp_kernel.unsqueeze(1).unsqueeze(1) # convolve input tensor with sobel kernel kernel_flip: torch.Tensor = tmp_kernel.flip(-3) # Pad with "replicate for spatial dims, but with zeros for channel spatial_pad = [ kernel.size(1) // 2, kernel.size(1) // 2, kernel.size(2) // 2, kernel.size(2) // 2 ] out_channels: int = 3 if order == 2 else 2 padded_inp: torch.Tensor = F.pad(input.reshape(b * c, 1, h, w), spatial_pad, 'replicate')[:, :, None] return F.conv3d(padded_inp, kernel_flip, padding=0).view(b, c, out_channels, h, w)
def __init__(self, kernel_size: Tuple[int, int], border_type: str = 'reflect', normalized: bool = True) -> None: super(BoxBlur, self).__init__() self.kernel_size: Tuple[int, int] = kernel_size self.border_type: str = border_type self.kernel: torch.Tensor = get_box_kernel2d(kernel_size) self.normalized: bool = normalized if self.normalized: self.kernel = normalize_kernel2d(self.kernel)
def __init__(self, kernel_size: int, border_type: str = 'reflect', normalized: bool = True) -> None: super(Laplacian, self).__init__() self.kernel_size: int = kernel_size self.border_type: str = border_type self.normalized: bool = normalized self.kernel: torch.Tensor = torch.unsqueeze( get_laplacian_kernel2d(kernel_size), dim=0) if self.normalized: self.kernel = normalize_kernel2d(self.kernel)
def __init__(self, mode: str = 'sobel', order: int = 1, normalized: bool = True) -> None: super(SpatialGradient, self).__init__() self.normalized: bool = normalized self.order: int = order self.mode: str = mode self.kernel = get_spatial_gradient_kernel2d(mode, order) if self.normalized: self.kernel = normalize_kernel2d(self.kernel) return
def box_blur( input: torch.Tensor, kernel_size: Tuple[int, int], border_type: str = 'reflect', normalized: bool = True ) -> torch.Tensor: r"""Blur an image using the box filter. .. image:: _static/img/box_blur.png The function smooths an image using the kernel: .. math:: K = \frac{1}{\text{kernel_size}_x * \text{kernel_size}_y} \begin{bmatrix} 1 & 1 & 1 & \cdots & 1 & 1 \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \vdots & \vdots & \vdots & \ddots & \vdots & \vdots \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \end{bmatrix} Args: image: the image to blur with shape :math:`(B,C,H,W)`. kernel_size: the blurring kernel size. border_type: the padding mode to be applied before convolving. The expected modes are: ``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. normalized: if True, L1 norm of the kernel is set to 1. Returns: the blurred tensor with shape :math:`(B,C,H,W)`. .. note:: See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/ filtering_operators.html>`__. Example: >>> input = torch.rand(2, 4, 5, 7) >>> output = box_blur(input, (3, 3)) # 2x4x5x7 >>> output.shape torch.Size([2, 4, 5, 7]) """ kernel: torch.Tensor = get_box_kernel2d(kernel_size) if normalized: kernel = normalize_kernel2d(kernel) return kornia.filter2d(input, kernel, border_type)
def box_blur(input: torch.Tensor, kernel_size: Tuple[int, int], border_type: str = 'reflect', normalized: bool = True) -> torch.Tensor: r"""Blurs an image using the box filter. The function smooths an image using the kernel: .. math:: K = \frac{1}{\text{kernel_size}_x * \text{kernel_size}_y} \begin{bmatrix} 1 & 1 & 1 & \cdots & 1 & 1 \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \vdots & \vdots & \vdots & \ddots & \vdots & \vdots \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \end{bmatrix} Args: image (torch.Tensor): the image to blur with shape :math:`(B,C,H,W)`. kernel_size (Tuple[int, int]): the blurring kernel size. border_type (str): the padding mode to be applied before convolving. The expected modes are: ``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'reflect'``. normalized (bool): if True, L1 norm of the kernel is set to 1. Returns: torch.Tensor: the blurred tensor with shape :math:`(B,C,H,W)`. Example: >>> input = torch.rand(2, 4, 5, 7) >>> output = box_blur(input, (3, 3)) # 2x4x5x7 >>> output.shape torch.Size([2, 4, 5, 7]) """ kernel: torch.Tensor = get_box_kernel2d(kernel_size) if normalized: kernel = normalize_kernel2d(kernel) return kornia.filter2d(input, kernel, border_type)
def laplacian( input: torch.Tensor, kernel_size: int, border_type: str = 'reflect', normalized: bool = True ) -> torch.Tensor: r"""Create an operator that returns a tensor using a Laplacian filter. .. image:: _static/img/laplacian.png The operator smooths the given tensor with a laplacian kernel by convolving it to each channel. It supports batched operation. Args: input: the input image tensor with shape :math:`(B, C, H, W)`. kernel_size: the size of the kernel. border_type: the padding mode to be applied before convolving. The expected modes are: ``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. normalized: if True, L1 norm of the kernel is set to 1. Return: the blurred image with shape :math:`(B, C, H, W)`. .. note:: See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/ filtering_edges.html>`__. Examples: >>> input = torch.rand(2, 4, 5, 5) >>> output = laplacian(input, 3) >>> output.shape torch.Size([2, 4, 5, 5]) """ kernel: torch.Tensor = torch.unsqueeze(get_laplacian_kernel2d(kernel_size), dim=0) if normalized: kernel = normalize_kernel2d(kernel) return kornia.filter2d(input, kernel, border_type)
def filter2d(input: torch.Tensor, kernel: torch.Tensor, border_type: str = 'reflect', normalized: bool = False, padding: str = 'same') -> torch.Tensor: r"""Convolve a tensor with a 2d kernel. The function applies a given kernel to a tensor. The kernel is applied independently at each depth channel of the tensor. Before applying the kernel, the function applies padding according to the specified mode so that the output remains in the same shape. Args: input: the input tensor with shape of :math:`(B, C, H, W)`. kernel: the kernel to be convolved with the input tensor. The kernel shape must be :math:`(1, kH, kW)` or :math:`(B, kH, kW)`. border_type: the padding mode to be applied before convolving. The expected modes are: ``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. normalized: If True, kernel will be L1 normalized. padding: This defines the type of padding. 2 modes available ``'same'`` or ``'valid'``. Return: torch.Tensor: the convolved tensor of same size and numbers of channels as the input with shape :math:`(B, C, H, W)`. Example: >>> input = torch.tensor([[[ ... [0., 0., 0., 0., 0.], ... [0., 0., 0., 0., 0.], ... [0., 0., 5., 0., 0.], ... [0., 0., 0., 0., 0.], ... [0., 0., 0., 0., 0.],]]]) >>> kernel = torch.ones(1, 3, 3) >>> filter2d(input, kernel, padding='same') tensor([[[[0., 0., 0., 0., 0.], [0., 5., 5., 5., 0.], [0., 5., 5., 5., 0.], [0., 5., 5., 5., 0.], [0., 0., 0., 0., 0.]]]]) """ if not isinstance(input, torch.Tensor): raise TypeError( f"Input border_type is not torch.Tensor. Got {type(input)}") if not isinstance(kernel, torch.Tensor): raise TypeError( f"Input border_type is not torch.Tensor. Got {type(kernel)}") if not isinstance(border_type, str): raise TypeError( f"Input border_type is not string. Got {type(border_type)}") if border_type not in ['constant', 'reflect', 'replicate', 'circular']: raise ValueError(f"Invalid border type, we expect 'constant', \ 'reflect', 'replicate', 'circular'. Got:{border_type}") if not isinstance(padding, str): raise TypeError(f"Input padding is not string. Got {type(padding)}") if padding not in ['valid', 'same']: raise ValueError( f"Invalid padding mode, we expect 'valid' or 'same'. Got: {padding}" ) if not len(input.shape) == 4: raise ValueError( f"Invalid input shape, we expect BxCxHxW. Got: {input.shape}") if not len(kernel.shape) == 3 and kernel.shape[0] != 1: raise ValueError( f"Invalid kernel shape, we expect 1xHxW. Got: {kernel.shape}") # prepare kernel b, c, h, w = input.shape tmp_kernel: torch.Tensor = kernel.unsqueeze(1).to(input) if normalized: tmp_kernel = normalize_kernel2d(tmp_kernel) tmp_kernel = tmp_kernel.expand(-1, c, -1, -1) height, width = tmp_kernel.shape[-2:] # pad the input tensor if padding == 'same': padding_shape: List[int] = _compute_padding([height, width]) input = F.pad(input, padding_shape, mode=border_type) # kernel and input tensor reshape to align element-wise or batch-wise params tmp_kernel = tmp_kernel.reshape(-1, 1, height, width) input = input.view(-1, tmp_kernel.size(0), input.size(-2), input.size(-1)) # convolve the tensor with the kernel. output = F.conv2d(input, tmp_kernel, groups=tmp_kernel.size(0), padding=0, stride=1) if padding == 'same': out = output.view(b, c, h, w) else: out = output.view(b, c, h - height + 1, w - width + 1) return out
def filter3d(input: torch.Tensor, kernel: torch.Tensor, border_type: str = 'replicate', normalized: bool = False) -> torch.Tensor: r"""Convolve a tensor with a 3d kernel. The function applies a given kernel to a tensor. The kernel is applied independently at each depth channel of the tensor. Before applying the kernel, the function applies padding according to the specified mode so that the output remains in the same shape. Args: input: the input tensor with shape of :math:`(B, C, D, H, W)`. kernel: the kernel to be convolved with the input tensor. The kernel shape must be :math:`(1, kD, kH, kW)` or :math:`(B, kD, kH, kW)`. border_type: the padding mode to be applied before convolving. The expected modes are: ``'constant'``, ``'replicate'`` or ``'circular'``. normalized: If True, kernel will be L1 normalized. Return: the convolved tensor of same size and numbers of channels as the input with shape :math:`(B, C, D, H, W)`. Example: >>> input = torch.tensor([[[ ... [[0., 0., 0., 0., 0.], ... [0., 0., 0., 0., 0.], ... [0., 0., 0., 0., 0.], ... [0., 0., 0., 0., 0.], ... [0., 0., 0., 0., 0.]], ... [[0., 0., 0., 0., 0.], ... [0., 0., 0., 0., 0.], ... [0., 0., 5., 0., 0.], ... [0., 0., 0., 0., 0.], ... [0., 0., 0., 0., 0.]], ... [[0., 0., 0., 0., 0.], ... [0., 0., 0., 0., 0.], ... [0., 0., 0., 0., 0.], ... [0., 0., 0., 0., 0.], ... [0., 0., 0., 0., 0.]] ... ]]]) >>> kernel = torch.ones(1, 3, 3, 3) >>> filter3d(input, kernel) tensor([[[[[0., 0., 0., 0., 0.], [0., 5., 5., 5., 0.], [0., 5., 5., 5., 0.], [0., 5., 5., 5., 0.], [0., 0., 0., 0., 0.]], <BLANKLINE> [[0., 0., 0., 0., 0.], [0., 5., 5., 5., 0.], [0., 5., 5., 5., 0.], [0., 5., 5., 5., 0.], [0., 0., 0., 0., 0.]], <BLANKLINE> [[0., 0., 0., 0., 0.], [0., 5., 5., 5., 0.], [0., 5., 5., 5., 0.], [0., 5., 5., 5., 0.], [0., 0., 0., 0., 0.]]]]]) """ if not isinstance(input, torch.Tensor): raise TypeError( f"Input border_type is not torch.Tensor. Got {type(input)}") if not isinstance(kernel, torch.Tensor): raise TypeError( f"Input border_type is not torch.Tensor. Got {type(kernel)}") if not isinstance(border_type, str): raise TypeError(f"Input border_type is not string. Got {type(kernel)}") if not len(input.shape) == 5: raise ValueError( f"Invalid input shape, we expect BxCxDxHxW. Got: {input.shape}") if not len(kernel.shape) == 4 and kernel.shape[0] != 1: raise ValueError( f"Invalid kernel shape, we expect 1xDxHxW. Got: {kernel.shape}") # prepare kernel b, c, d, h, w = input.shape tmp_kernel: torch.Tensor = kernel.unsqueeze(1).to(input) if normalized: bk, dk, hk, wk = kernel.shape tmp_kernel = normalize_kernel2d(tmp_kernel.view( bk, dk, hk * wk)).view_as(tmp_kernel) tmp_kernel = tmp_kernel.expand(-1, c, -1, -1, -1) # pad the input tensor depth, height, width = tmp_kernel.shape[-3:] padding_shape: List[int] = _compute_padding([depth, height, width]) input_pad: torch.Tensor = F.pad(input, padding_shape, mode=border_type) # kernel and input tensor reshape to align element-wise or batch-wise params tmp_kernel = tmp_kernel.reshape(-1, 1, depth, height, width) input_pad = input_pad.view(-1, tmp_kernel.size(0), input_pad.size(-3), input_pad.size(-2), input_pad.size(-1)) # convolve the tensor with the kernel. output = F.conv3d(input_pad, tmp_kernel, groups=tmp_kernel.size(0), padding=0, stride=1) return output.view(b, c, d, h, w)
def filter2D(input: torch.Tensor, kernel: torch.Tensor, border_type: str = 'reflect', normalized: bool = False) -> torch.Tensor: r"""Function that convolves a tensor with a 2d kernel. The function applies a given kernel to a tensor. The kernel is applied independently at each depth channel of the tensor. Before applying the kernel, the function applies padding according to the specified mode so that the output remains in the same shape. Args: input (torch.Tensor): the input tensor with shape of :math:`(B, C, H, W)`. kernel (torch.Tensor): the kernel to be convolved with the input tensor. The kernel shape must be :math:`(1, kH, kW)`. border_type (str): the padding mode to be applied before convolving. The expected modes are: ``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'reflect'``. normalized (bool): If True, kernel will be L1 normalized. Return: torch.Tensor: the convolved tensor of same size and numbers of channels as the input with shape :math:`(B, C, H, W)`. Example: >>> input = torch.tensor([[[ ... [0., 0., 0., 0., 0.], ... [0., 0., 0., 0., 0.], ... [0., 0., 5., 0., 0.], ... [0., 0., 0., 0., 0.], ... [0., 0., 0., 0., 0.],]]]) >>> kernel = torch.ones(1, 3, 3) >>> kornia.filter2D(input, kernel) torch.tensor([[[[0., 0., 0., 0., 0.] [0., 5., 5., 5., 0.] [0., 5., 5., 5., 0.] [0., 5., 5., 5., 0.] [0., 0., 0., 0., 0.]]]]) """ testing.check_is_tensor(input) testing.check_is_tensor(kernel) if not isinstance(border_type, str): raise TypeError("Input border_type is not string. Got {}" .format(type(kernel))) if not len(input.shape) == 4: raise ValueError("Invalid input shape, we expect BxCxHxW. Got: {}" .format(input.shape)) if not len(kernel.shape) == 3 and kernel.shape[0] != 1: raise ValueError("Invalid kernel shape, we expect 1xHxW. Got: {}" .format(kernel.shape)) # prepare kernel b, c, h, w = input.shape tmp_kernel: torch.Tensor = kernel[None].type_as(input) if normalized: tmp_kernel = normalize_kernel2d(tmp_kernel) # pad the input tensor height, width = tmp_kernel.shape[-2:] padding_shape: List[int] = compute_padding([height, width]) input_pad: torch.Tensor = F.pad(input, padding_shape, mode=border_type) return F.conv2d(input_pad, tmp_kernel.expand(c, -1, -1, -1), groups=c, padding=0, stride=1)