Пример #1
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              bias=True,
              pre_permuted_filters=True):
     # stride, dilation and groups !=1 functionality not tested
     stride = 1
     dilation = 1
     groups = 1
     # zero padding is added automatically in conv4d function to preserve tensor size
     padding = 0
     kernel_size = _quadruple(kernel_size)
     stride = _quadruple(stride)
     padding = _quadruple(padding)
     dilation = _quadruple(dilation)
     super(Conv4d, self).__init__(in_channels, out_channels, kernel_size,
                                  stride, padding, dilation, False,
                                  _quadruple(0), groups, bias)
     # weights will be sliced along one dimension during convolution loop
     # make the looping dimension to be the first one in the tensor,
     # so that we don't need to call contiguous() inside the loop
     self.pre_permuted_filters = pre_permuted_filters
     if self.pre_permuted_filters:
         self.weight.data = self.weight.data.permute(2, 0, 1, 3, 4,
                                                     5).contiguous()
     self.use_half = False
Пример #2
0
    def __init__(self, dx, dy, kernel_size=3):
        """Constructor method
        """
        super(Grad1Filter2d, self).__init__()
        self.dx = dx
        self.dy = dy

        # smoothed central finite diff
        # https://en.wikipedia.org/wiki/Finite_difference_coefficient
        WEIGHT_H_3x3 = torch.FloatTensor([[[[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]
                                           ]]) / 8.

        # larger kernel size tends to smooth things out
        WEIGHT_H_5x5 = torch.FloatTensor(
            [[[[1, -8, 0, 8, -1], [2, -16, 0, 16, -2], [3, -24, 0, 24, -3],
               [2, -16, 0, 16, -2], [1, -8, 0, 8, -1]]]]) / (9 * 12.)

        if kernel_size == 3:
            self.register_buffer("weight_h", WEIGHT_H_3x3)
            self.register_buffer("weight_v", WEIGHT_H_3x3.transpose(-1, -2))
            self.padding = _quadruple(1)

        elif kernel_size == 5:
            self.register_buffer("weight_h", WEIGHT_H_5x5)
            self.register_buffer("weight_v", WEIGHT_H_5x5.transpose(-1, -2))
            self.padding = _quadruple(2)
        else:
            raise ValueError(
                'kernel_size size {:d} is not supported!'.format(kernel_size))
Пример #3
0
    def __init__(self, dx, dy, kernel_size=3):
        """Constructor method
        """
        super(Grad2Filter2d, self).__init__()
        self.dx = dx
        self.dy = dy
        # smoothed central finite diff
        WEIGHT_H_3x3 = torch.FloatTensor([[[[1, -2, 1], [2, -4, 2], [1, -2, 1]]
                                           ]]) / 4.

        # larger kernel size tends to smooth things out
        WEIGHT_H_5x5 = torch.FloatTensor(
            [[[[-1, 16, -30, 16, -1], [-2, 32, -60, 32, -1],
               [-3, 48, -90, 48, -1], [-2, 32, -60, 32, -1],
               [-1, 16, -30, 16, -1]]]]) / (9 * 12.)

        if kernel_size == 3:
            self.register_buffer("weight_h", WEIGHT_H_3x3)
            self.register_buffer("weight_v", WEIGHT_H_3x3.transpose(-1, -2))
            self.register_buffer("weight_laplace",
                                 WEIGHT_H_3x3 + WEIGHT_H_3x3.transpose(-1, -2))
            self.padding = _quadruple(1)
        elif kernel_size == 5:
            self.register_buffer("weight_h", WEIGHT_H_5x5)
            self.register_buffer("weight_v", WEIGHT_H_5x5.transpose(-1, -2))
            self.register_buffer("weight_laplace",
                                 WEIGHT_H_5x5 + WEIGHT_H_5x5.transpose(-1, -2))
            self.padding = _quadruple(2)
        else:
            raise ValueError(
                'kernel_size size {:d} is not supported!'.format(kernel_size))
Пример #4
0
    def forward(self, inputs):
        if not self.force_fp:
            weight = self.quant_weight(self.weight)
            if self.padding_after_quant:
                inputs = self.quant_activation(inputs)
                inputs = F.pad(inputs, _quadruple(self.pads), 'constant', 0)
            else:  # ensure the correct quantization levels (for example, BNNs only own the -1 and 1. zero-padding should be quantized into one of them
                inputs = F.pad(inputs, _quadruple(self.pads), 'constant', 0)
                inputs = self.quant_activation(inputs)
        else:
            weight = self.weight

        output = F.conv2d(inputs, weight, self.bias, self.stride, self.padding,
                          self.dilation, self.groups)
        return output
Пример #5
0
 def __init__(self, kernel_size=3, stride=1, padding=0, same=True):
     """Initialize with kernel_size, stride, padding."""
     super().__init__()
     self.k = _pair(kernel_size)
     self.stride = _pair(stride)
     self.padding = _quadruple(padding)  # convert to l, r, t, b
     self.same = same
Пример #6
0
    def __init__(self, kernel_size=3, stride=1, padding=0, same=False):
        super(MedianPool2d, self).__init__()

        self.k = _pair(kernel_size)
        self.stride = _pair(stride)
        self.padding = _quadruple(padding)  # convert to l, r, t, b
        self.same = same
Пример #7
0
    def grad_h(self, image, filter_size=3):
        """Get image gradient along horizontal direction, or x axis.
        Option to do replicate padding for image before convolution. This is mainly
        for estimate the du/dy, enforcing Neumann boundary condition.

        Args:
            image (Tensor): (1, 1, H, W)
            replicate_pad (None, int, 4-tuple): if 4-tuple, (padLeft, padRight, padTop, 
                padBottom)
        """
        image_width = image.shape[-1]

        if filter_size == 3:
            replicate_pad = 1
            kernel = self.VSOBEL_WEIGHTS_3x3
        elif filter_size == 5:
            replicate_pad = 2
            kernel = self.VSOBEL_WEIGHTS_5x5
        image = F.pad(image, _quadruple(replicate_pad), mode='replicate')
        grad = F.conv2d(image, kernel, stride=1, padding=0,
                        bias=None) * image_width
        # modify the boundary based on forward & backward finite difference (three points)
        # forward [-3, 4, -1], backward [3, -4, 1]
        if self.correct:
            return torch.matmul(grad, self.modifier)
        else:
            return grad
Пример #8
0
    def __init__(
        self,
        filter_type=[1, 3, 3, 1],
        stride=1,
        padding=1,
        factor=1,
        direction="vh",
        ring=True,
    ):
        super().__init__()
        self.filter_type = filter_type
        self.stride = stride
        self.padding = _quadruple(padding)
        self.factor = factor
        self.direction = direction

        self.pad = Pad(
            padding=self.padding,
            horizontal="circular" if ring else "reflect",
            vertical="reflect",
        )

        kernel = torch.tensor(self.filter_type, dtype=torch.float32)
        if direction == "vh":
            kernel = torch.outer(kernel, kernel)
        elif direction == "v":
            kernel = kernel[:, None]
        elif direction == "h":
            kernel = kernel[None, :]
        else:
            raise ValueError
        kernel /= kernel.sum()
        if factor > 1:
            kernel *= factor ** 2
        self.register_buffer("kernel", kernel[None, None])
Пример #9
0
    def forward(self, x):
        # padding zero when cannot just be divided
        B, C, H, W = x.shape
        if H % self.stride != 0:
            pad = (self.stride - (H % self.stride)) // 2
            x = F.pad(x, _quadruple(pad), mode="constant", value=0)

        B, C, H, W = x.shape
        #print(B, C, H, W, self.stride)
        # consider to employ the PixelShuffle layer instead
        x = x.reshape(B, C, H // self.stride, self.stride, W // self.stride,
                      self.stride)
        x = x.transpose(4,
                        3).reshape(B, C, 1, H // self.stride, W // self.stride,
                                   self.stride * self.stride)
        x = x.transpose(2, 5).reshape(B, C * self.stride * self.stride,
                                      H // self.stride, W // self.stride)
        x = self.conv(x)
        if self.bn_before_restore:
            x = self.bn(x)

        if self.keepdim:  # restore the channel dimension, however resolution might be altered
            B, C, H, W = x.shape
            x = x.reshape(B, C // 4, 4, H, W, 1)
            x = x.transpose(2, 5).reshape(B, C // 4, H, W, 2, 2)
            x = x.transpose(4, 3).reshape(B, C // 4, H * 2, W * 2)
        return x
Пример #10
0
    def __init__(self, cvae_small, cvae_input_sz=16, stride=1, padding=0, same=True, img_size=64):
        super(ConvVAE2d, self).__init__()
        self.cvae_small = cvae_small

        self.k = _pair(cvae_input_sz)
        self.stride = _pair(stride)
        self.padding = _quadruple(padding)  # convert to l, r, t, b
        self.same = same
        self.fold = nn.Fold(output_size=(img_size, img_size), kernel_size=self.k, stride=self.stride)
        self.padding = self._padding(size=(img_size, img_size))
Пример #11
0
    def __init__(self, dx, kernel_size=3, device='cpu'):
        self.dx = dx
        # smoothed central finite diff
        WEIGHT_H_3x3 = torch.FloatTensor([[[[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]
                                           ]]).to(device) / 8.

        # larger kernel size tends to smooth things out
        WEIGHT_H_5x5 = torch.FloatTensor([
            [[[1, -8, 0, 8, -1], [2, -16, 0, 16, -2], [3, -24, 0, 24, -3],
              [2, -16, 0, 16, -2], [1, -8, 0, 8, -1]]]
        ]).to(device) / (9 * 12.)
        if kernel_size == 3:
            self.weight_h = WEIGHT_H_3x3
            self.weight_v = WEIGHT_H_3x3.transpose(-1, -2)
            self.weight = torch.cat((self.weight_h, self.weight_v), 0)
            self.padding = _quadruple(1)
        elif kernel_size == 5:
            self.weight_h = WEIGHT_H_5x5
            self.weight_v = WEIGHT_H_5x5.transpose(-1, -2)
            self.padding = _quadruple(2)
Пример #12
0
 def __init__(self,
              padding: _size_4_t,
              lon_dim: int,
              lat_mode: str = 'constant',
              value: float = 0.) -> None:
     super(LatLonPad, self).__init__()
     assert lat_mode in ['constant', 'circular']
     self.padding = _quadruple(padding)
     self.lon_dim = lon_dim
     self.lat_mode = lat_mode
     self.value = value
Пример #13
0
 def __init__(self, kernel_size=3, stride=1):
     super(MedianSmoothing2D, self).__init__()
     self.kernel_size = kernel_size
     self.stride = stride
     padding = int(kernel_size) // 2
     if _is_even(kernel_size):
         # both ways of padding should be fine here
         # self.padding = (padding, 0, padding, 0)
         self.padding = (0, padding, 0, padding)
     else:
         self.padding = _quadruple(padding)
Пример #14
0
    def forward(self, x):
        """Forward pass.

        :param x:  [B, in_features, H, W] input feature tensor
        :type x: torch.Tensor
        :return: 
            - out: [B, out_features, H, W] output feature tensor
        :rtype: torch.Tensor
        """
        x = self.conv(F.pad(x, _quadruple(1), mode='replicate'))
        return x * torch.exp(
            torch.clamp(self.scale, -4., np.log(4)) * self.logscale_factor)
Пример #15
0
 def __call__(self, image):
     # image: (B, C, H, W)
     padding = (self.weights.shape[-1] - 1) // 2
     image = F.pad(image, _quadruple(padding), mode='reflect')
     channels = image.shape[1]
     weights = self.weights.repeat(channels, 1, 1, 1)
     return F.conv2d(image,
                     weights,
                     bias=None,
                     stride=1,
                     padding=0,
                     groups=channels)
  def __init__(self, kernel_size=3, stride=1, padding=0, same=False):
    super(VariancePool2d, self).__init__()
    self.kernel_size = kernel_size
    self.stride = stride
    self.padding = padding
    self.same = same

    self._k = _pair(kernel_size)
    self._s = _pair(stride)
    self._p = _quadruple(padding)  # convert to l, r, t, b

    self.pool = nn.AvgPool2d(kernel_size=self.kernel_size, stride=self.stride)
Пример #17
0
    def __call__(self, img):
        """
        Args:
            img (PIL Image): Image to be padded.

        Returns:
            PIL Image: Padded image.
        """
        if isinstance(img, torch.Tensor):
            paddings = _quadruple(get_padding(img, self.max_w, self.max_h))
            return img_tensor_pad(img, paddings, self.padding_mode, self.fill)
        return img_pad(img, get_padding(img, self.max_w, self.max_h),
                       self.fill, self.padding_mode)
Пример #18
0
def generate_spatial_descriptor(data, kernel_size):
    '''
    Applies self local similarity with fixed sliding window.
    Args:
        data: featuren map, variable of shape (b,c,h,w)
        kernel_size: width/heigh of local window, int

    Returns:
        output: global spatial map, variable of shape (b,c,h,w)
    '''

    padding = int(kernel_size // 2)  #5.7//2 = 2.0, 5.2//2 = 2.0
    b, c, h, w = data.shape
    p2d = _quadruple(padding)  #(pad_l,pad_r,pad_t,pad_b)
    data_padded = Func.pad(data, p2d, 'constant', 0)  #output variable
    assert data_padded.shape == (b, c, (h + 2 * padding), (
        w + 2 * padding)), 'Error: data_padded shape{} wrong!'.format(
            data_padded.shape)

    output = Variable(torch.zeros(b, kernel_size * kernel_size, h, w),
                      requires_grad=data.requires_grad)
    if data.is_cuda:
        output = output.cuda(data.get_device())

    for hi in range(h):
        for wj in range(w):
            q = data[:, :, hi, wj].contiguous()  #(b,c)
            i = hi + padding  #h index in datapadded
            j = wj + padding  #w index in datapadded

            hs = i - padding
            he = i + padding + 1
            ws = j - padding
            we = j + padding + 1
            patch = data_padded[:, :, hs:he, ws:we].contiguous()  #(b,c,k,k)
            assert (patch.shape == (b, c, kernel_size, kernel_size))
            hk, wk = kernel_size, kernel_size

            # reshape features for matrix multiplication
            feature_a = q.view(b, c, 1 * 1).transpose(
                1, 2)  #(b,1,c) input is not contigous
            feature_b = patch.view(b, c, hk * wk)  #(b,c,L)

            # perform matrix mult.
            feature_mul = torch.bmm(feature_a, feature_b)  #(b,1,L)
            assert (feature_mul.shape == (b, 1, hk * wk))
            # indexed [batch,row_A,col_A,row_B,col_B]
            correlation_tensor = feature_mul.unsqueeze(1)  #(b,L)
            output[:, :, hi, wj] = correlation_tensor

    return output
Пример #19
0
    def __init__(self, dx, kernel_size=3, device='cpu'):
        super().__init__()
        self.dx = dx
        # no smoothing
        WEIGHT_3x3 = torch.FloatTensor([[[[0, 1, 0], [1, -4, 1],
                                          [0, 1, 0]]]]).to(device)
        # smoothed
        WEIGHT_3x3 = torch.FloatTensor([[[[1, 2, 1], [-2, -4, -2], [1, 2, 1]]]
                                        ]).to(device) / 4.

        WEIGHT_3x3 = WEIGHT_3x3 + torch.transpose(WEIGHT_3x3, -2, -1)

        print(WEIGHT_3x3)

        WEIGHT_5x5 = torch.FloatTensor(
            [[[[0, 0, -1, 0, 0], [0, 0, 16, -0, 0], [-1, 16, -60, 16, -1],
               [0, 0, 16, 0, 0], [0, 0, -1, 0, 0]]]]).to(device) / 12.
        if kernel_size == 3:
            self.padding = _quadruple(1)
            self.weight = WEIGHT_3x3
        elif kernel_size == 5:
            self.padding = _quadruple(2)
            self.weight = WEIGHT_5x5
Пример #20
0
 def grad_h(self, image, filter_size=5):
     # horizontal derivative
     image_width = image.shape[-1]
     if filter_size == 3:
         replicate_pad = 1
         kernel = self.kernel_h_3x3
     elif filter_size == 5:
         replicate_pad = 2
         kernel = self.kernel_h_5x5
     elif filter_size == 7:
         replicate_pad = 3
         kernel = self.kernel_h_7x7
     image = F.pad(image, _quadruple(replicate_pad), mode='replicate')
     return F.conv2d(image, kernel, stride=1, padding=0,
                     bias=None) * image_width
Пример #21
0
 def grad_v(self, image, filter_size=5):
     # vertical derivative
     image_height = image.shape[-2]
     if filter_size == 3:
         replicate_pad = 1
         kernel = self.kernel_h_3x3.transpose(-1, -2)
     elif filter_size == 5:
         replicate_pad = 2
         kernel = self.kernel_h_5x5.transpose(-1, -2)
     elif filter_size == 7:
         replicate_pad = 3
         kernel = self.kernel_h_7x7.transpose(-1, -2)
     image = F.pad(image, _quadruple(replicate_pad), mode='replicate')
     return F.conv2d(image, kernel, stride=1, padding=0,
                     bias=None) * image_height
Пример #22
0
 def grad_v(self, image, filter_size=3):
     image_height = image.shape[-2]
     if filter_size == 3:
         replicate_pad = 1
         kernel = self.HSOBEL_WEIGHTS_3x3
     elif filter_size == 5:
         replicate_pad = 2
         kernel = self.HSOBEL_WEIGHTS_5x5
     image = F.pad(image, _quadruple(replicate_pad), mode='replicate')
     grad = F.conv2d(image, kernel, stride=1, padding=0,
                     bias=None) * image_height
     # modify the boundary based on forward & backward finite difference
     if self.correct:
         return torch.matmul(self.modifier.t(), grad)
     else:
         return grad
Пример #23
0
def global_spatial_representation_efficient(data, kernel_size):
    '''
    2019-04-27 Applies self local similarity with fixed sliding window. Efficient version.
    Args:
        data: featuren map, variable of shape (b,c,h,w)
        kernel_size: width/heigh of local window, int
    Returns:
        output: global spatial map, variable of shape (b,k^2,h,w)
    '''

    padding = int(kernel_size // 2)  #5.7//2 = 2.0, 5.2//2 = 2.0
    b, c, h, w = data.shape
    p2d = _quadruple(padding)  #(pad_l,pad_r,pad_t,pad_b)
    data_padded = Func.pad(data, p2d, 'constant', 0)  #output variable
    assert data_padded.shape == (b, c, (h + 2 * padding), (
        w + 2 * padding)), 'Error: data_padded shape{} wrong!'.format(
            data_padded.shape)

    output = Variable(torch.zeros(b, kernel_size * kernel_size, h, w),
                      requires_grad=data.requires_grad)
    if data.is_cuda:
        output = output.cuda(data.get_device())

    xs, xe = padding, w + padding
    ys, ye = padding, h + padding
    patch_center = data_padded[:, :, ys:ye, xs:xe]

    i = 0
    for dy in np.arange(-padding, padding + 1):
        for dx in np.arange(-padding, padding + 1):
            hs = ys + dy
            he = ye + dy
            ws = xs + dx
            we = xe + dx

            patch_neighbor = data_padded[:, :, hs:he, ws:we]  #(b,c,h,w)
            correlation_tensor = torch.sum(patch_neighbor * patch_center,
                                           dim=1)
            output[:, i, :, :] = correlation_tensor
            i += 1

    return output
Пример #24
0
 def __init__(self, padding, horizontal="constant", vertical="constant"):
     super().__init__()
     self.padding = _quadruple(padding)
     self.horizontal = horizontal
     self.vertical = vertical
Пример #25
0
 def __init__(self, kernel_size=2, stride=2, padding=0, same=False):
     super(StochasticPool2d, self).__init__()
     self.kernel_size = _pair(kernel_size)  # I don't know what this is but it works
     self.stride = _pair(stride)
     self.padding = _quadruple(padding)  # convert to l, r, t, b
     self.same = same
Пример #26
0
 def __init__(self, padding):
     super(ReflectionPad3d, self).__init__()
     self.padding = _quadruple(padding)
 def __init__(self, kernel_size=2, stride=2, padding=0, same=False):
     super(QuaternionMaxAmpPool2d, self).__init__()
     self.k = _pair(kernel_size)
     self.stride = _pair(stride)
     self.padding = _quadruple(padding)  # convert to l, r, t, b
     self.same = same