示例#1
0
def conv1x1(in_channels, out_channels, stride=1, bias=True, groups=1):
    return nnn.PartialConv2d(in_channels,
                             out_channels,
                             kernel_size=1,
                             stride=stride,
                             bias=bias,
                             groups=groups)
示例#2
0
    def __init__(self,
                 in_feat,
                 out_feat,
                 kernel_size,
                 reduction,
                 bias=True,
                 norm=False,
                 act=nn.ReLU(True),
                 downscale=False,
                 return_ca=False):
        super(RCAB, self).__init__()

        self.body = nn.Sequential(
            ConvNorm(in_feat,
                     out_feat,
                     kernel_size,
                     stride=2 if downscale else 1,
                     norm=norm), act,
            ConvNorm(out_feat, out_feat, kernel_size, stride=1, norm=norm),
            CALayer(out_feat, reduction))
        self.downscale = downscale
        if downscale:
            self.downConv = nnn.PartialConv2d(in_feat,
                                              out_feat,
                                              kernel_size=3,
                                              stride=2,
                                              padding=1)
        self.return_ca = return_ca
示例#3
0
 def __init__(self, channel, reduction=16):
     super(CALayer, self).__init__()
     # global average pooling: feature --> point
     self.avg_pool = nn.AdaptiveAvgPool2d(1)
     # feature channel downscale and upscale --> channel weight
     self.conv_du = nn.Sequential(
         nnn.PartialConv2d(channel,
                           channel // reduction,
                           1,
                           padding=0,
                           bias=True), nn.ReLU(inplace=True),
         nnn.PartialConv2d(channel // reduction,
                           channel,
                           1,
                           padding=0,
                           bias=True), nn.Sigmoid())
示例#4
0
    def __init__(
            self,
            in_feat,
            out_feat,
            kernel_size=3,
            reduction=False,
            bias=True,  # 'reduction' is just for placeholder
            norm=False,
            act=nn.ReLU(True),
            downscale=False):
        super(ResBlock, self).__init__()

        self.body = nn.Sequential(
            ConvNorm(in_feat,
                     out_feat,
                     kernel_size=kernel_size,
                     stride=2 if downscale else 1), act,
            ConvNorm(out_feat, out_feat, kernel_size=kernel_size, stride=1))

        self.downscale = None
        if downscale:
            self.downscale = nnn.PartialConv2d(in_feat,
                                               out_feat,
                                               kernel_size=1,
                                               stride=2)
示例#5
0
    def __init__(self, rgbRange, rgbMean, sign, nChannel=3):
        super(meanShift, self).__init__()
        if nChannel == 1:
            l = rgbMean[0] * rgbRange * float(sign)

            self.shifter = nnn.PartialConv2d(1,
                                             1,
                                             kernel_size=1,
                                             stride=1,
                                             padding=0)
            self.shifter.weight.data = torch.eye(1).view(1, 1, 1, 1)
            self.shifter.bias.data = torch.Tensor([l])
        elif nChannel == 3:
            r = rgbMean[0] * rgbRange * float(sign)
            g = rgbMean[1] * rgbRange * float(sign)
            b = rgbMean[2] * rgbRange * float(sign)

            self.shifter = nnn.PartialConv2d(3,
                                             3,
                                             kernel_size=1,
                                             stride=1,
                                             padding=0)
            self.shifter.weight.data = torch.eye(3).view(3, 3, 1, 1)
            self.shifter.bias.data = torch.Tensor([r, g, b])
        else:
            r = rgbMean[0] * rgbRange * float(sign)
            g = rgbMean[1] * rgbRange * float(sign)
            b = rgbMean[2] * rgbRange * float(sign)
            self.shifter = nnn.PartialConv2d(6,
                                             6,
                                             kernel_size=1,
                                             stride=1,
                                             padding=0)
            self.shifter.weight.data = torch.eye(6).view(6, 6, 1, 1)
            self.shifter.bias.data = torch.Tensor([r, g, b, r, g, b])

        # Freeze the meanShift layer
        for params in self.shifter.parameters():
            params.requires_grad = False
示例#6
0
def conv7x7(in_channels,
            out_channels,
            stride=1,
            padding=3,
            bias=True,
            groups=1):
    return nnn.PartialConv2d(in_channels,
                             out_channels,
                             kernel_size=7,
                             stride=stride,
                             padding=padding,
                             bias=bias,
                             groups=groups)
示例#7
0
def conv(in_channels,
         out_channels,
         kernel_size,
         stride=1,
         bias=True,
         groups=1):
    return nnn.PartialConv2d(in_channels,
                             out_channels,
                             kernel_size=kernel_size,
                             padding=kernel_size // 2,
                             stride=1,
                             bias=bias,
                             groups=groups)
示例#8
0
    def __init__(self, in_feat, out_feat, kernel_size, stride=1, norm=False):
        super(ConvNorm, self).__init__()

        reflection_padding = kernel_size // 2
        self.reflection_pad = nn.ReflectionPad2d(reflection_padding)
        self.conv = nnn.PartialConv2d(in_feat,
                                      out_feat,
                                      stride=stride,
                                      kernel_size=kernel_size,
                                      bias=True)

        self.norm = norm
        if norm == 'IN':
            self.norm = nn.InstanceNorm2d(out_feat, track_running_stats=True)
        elif norm == 'BN':
            self.norm = nn.BatchNorm2d(out_feat)