Example #1
0
 def __init__(self,
              in_channels,
              scale_channel,
              kernel_size=3,
              stride=1,
              activation='relu',
              mode="spade"):
     super(ScaleableResidualBlock, self).__init__()
     self.activation = getattr(F, activation)
     pad_size = int((kernel_size - 1) / 2)
     self.pad = nn.ReflectionPad2d(pad_size)
     self.conv1 = nn.Conv2d(in_channels,
                            in_channels,
                            kernel_size,
                            stride=stride)
     self.conv2 = nn.Conv2d(in_channels,
                            in_channels,
                            kernel_size,
                            stride=stride)
     self.norm1 = GDN(in_channels, inverse=True)
     self.norm2 = GDN(in_channels, inverse=True)
     self.adaptivelayer1 = AdaptiveDeNormalization(in_chan=scale_channel,
                                                   num_filters=in_channels,
                                                   mode=mode,
                                                   activation=activation)
     self.adaptivelayer2 = AdaptiveDeNormalization(in_chan=scale_channel,
                                                   num_filters=in_channels,
                                                   mode=mode,
                                                   activation=activation)
Example #2
0
    def __init__(self, M, N, scale=3, out=3, share_rgb=True):
        super().__init__()
        assert (M % scale == 0), "M={} can be divisible by scale={}".format(
            M, scale)
        self.head = nn.Sequential(conv(M // scale, N, stride=1, kernel_size=3),
                                  GDN(N, inverse=True), deconv(N, N),
                                  GDN(N, inverse=True))

        self.blocks = nn.ModuleList([])

        to_rgb = nn.Sequential(deconv(N, N), GDN(N,
                                                 inverse=True), deconv(N, N),
                               GDN(N, inverse=True), deconv(N, 3))

        for m in range(scale):
            resblock_m = ScaleableResidualBlock(in_channels=N,
                                                scale_channel=M // scale,
                                                activation='relu',
                                                mode="spade")
            self.add_module(f'resblock_{str(m)}', resblock_m)
            if share_rgb == False:
                self.add_module(f'to_rgb_{str(m)}', copy.deepcopy(to_rgb))
            else:
                self.add_module(f'to_rgb_{str(m)}', copy.copy(to_rgb))

        self.scale_size = M // scale
        self.scale = scale
Example #3
0
 def __init__(self, N, M, **kwargs):
     super().__init__()
     self.g_s_conv1 = deconv(M, N)
     self.g_s_gdn1 = GDN(N, inverse=True)
     self.g_s_conv2 = deconv(N, N)
     self.g_s_gdn2 = GDN(N, inverse=True)
     self.g_s_conv3 = deconv(N, N)
     self.g_s_gdn3 = GDN(N, inverse=True)
     self.g_s_conv4 = deconv(N, 3)
Example #4
0
 def __init__(self, N, M, **kwargs):
     super().__init__()
     self.g_a_conv1 = conv(3, N)
     self.g_a_gdn1 = GDN(N)
     self.g_a_conv2 = conv(N, N)
     self.g_a_gdn2 = GDN(N)
     self.g_a_conv3 = conv(N, N)
     self.g_a_gdn3 = GDN(N)
     self.g_a_conv4 = conv(N, M)
Example #5
0
    def __init__(self, N, M, **kwargs):
        super().__init__()
        self.pre_conv = conv(6,3,stride=1) #不缩放!
        self.pre_gdn = GDN(3)

        self.g_a_conv1 = conv(3, N)
        self.g_a_gdn1 = GDN(N)
        self.g_a_conv2 = conv(N, N)
        self.g_a_gdn2 = GDN(N)
        self.g_a_conv3 = conv(N, N)
        self.g_a_gdn3 = GDN(N)
        self.g_a_conv4 = conv(N, M)
Example #6
0
    def __init__(self, N, M, **kwargs):
        super().__init__()
        self.g_s_conv1 = deconv(M, N)
        self.g_s_gdn1 = GDN(N, inverse=True)
        self.g_s_conv2 = deconv(N, N)
        self.g_s_gdn2 = GDN(N, inverse=True)
        self.g_s_conv3 = deconv(N, N)
        self.g_s_gdn3 = GDN(N, inverse=True)
        self.g_s_conv4 = deconv(N, 3)

        #
        self.after_gdn = GDN(3, inverse=True)
        self.after_conv = deconv(6, 3, stride=1)  # 不缩放!
Example #7
0
    def __init__(self, N=128):
        super().__init__(entropy_bottleneck_channels=N)

        self.encode = nn.Sequential(
            conv(3, N, kernel_size=9, stride=4),
            GDN(N),
            conv(N, N),
            GDN(N),
            conv(N, N),
        )

        self.decode = nn.Sequential(
            deconv(N, N),
            GDN(N, inverse=True),
            deconv(N, N),
            GDN(N, inverse=True),
            deconv(N, 3, kernel_size=9, stride=4),
        )
Example #8
0
    def test_gdn(self):
        g = GDN(128)
        x = torch.rand(1, 128, 1, 1)
        y0 = g(x)

        m = torch.jit.script(g)
        y1 = m(x)

        assert torch.allclose(y0, y1)
Example #9
0
    def __init__(self, N, M, scale, **kwargs):
        super().__init__(entropy_bottleneck_channels=M, **kwargs)

        self.g_a = nn.Sequential(
            conv(3, N),
            GDN(N),
            conv(N, N),
            GDN(N),
            conv(N, N),
            GDN(N),
            conv(N, M),
        )

        self.g_s = MultiScale_Decoder(M, N, scale=scale, out=3)

        self.N = N
        self.M = M
        self.scale = scale
        self.scale_size = M // scale
Example #10
0
 def __init__(self, out_channel_N=128):
     super(Analysis, self).__init__()
     #d = (d - kennel_size + 2 * padding) / stride + 1s
     #batch*128*64*64
     self.conv1 = nn.Conv2d(3, out_channel_N, 9, stride=4, padding=4)
     self.gdn1 = GDN(out_channel_N)
     # batch*128*32*32
     self.conv2 = nn.Conv2d(out_channel_N,
                            out_channel_N,
                            5,
                            stride=2,
                            padding=2)
     self.gdn2 = GDN(out_channel_N)
     # batch*128*16*16
     self.conv3 = nn.Conv2d(out_channel_N,
                            out_channel_N,
                            5,
                            stride=2,
                            padding=2,
                            bias=False)
Example #11
0
    def test_igdn(self):
        g = GDN(32, inverse=True)
        x = torch.rand(1, 32, 16, 16, requires_grad=True)
        y = g(x)
        y.backward(x)

        assert y.shape == x.shape
        assert x.grad is not None
        assert x.grad.shape == x.shape

        y_ref = x * torch.sqrt(1 + 0.1 * (x**2))
        assert torch.allclose(y_ref, y)
Example #12
0
    def __init__(self, N, M, **kwargs):
        super().__init__(entropy_bottleneck_channels=M, **kwargs)

        self.g_a = nn.Sequential(
            conv(3, N),
            GDN(N),
            conv(N, N),
            GDN(N),
            conv(N, N),
            GDN(N),
            conv(N, M),
        )

        self.g_s = nn.Sequential(
            deconv(M, N),
            GDN(N, inverse=True),
            deconv(N, N),
            GDN(N, inverse=True),
            deconv(N, N),
            GDN(N, inverse=True),
            deconv(N, 3),
        )

        self.N = N
        self.M = M
Example #13
0
 def __init__(self, out_channel_N=128):
     super(Synthesis, self).__init__()
     self.deconv1 = nn.ConvTranspose2d(out_channel_N,
                                       out_channel_N,
                                       5,
                                       stride=2,
                                       padding=2,
                                       output_padding=1)
     self.igdn1 = GDN(out_channel_N, inverse=True)
     self.deconv2 = nn.ConvTranspose2d(out_channel_N,
                                       out_channel_N,
                                       5,
                                       stride=2,
                                       padding=2,
                                       output_padding=1)
     self.igdn2 = GDN(out_channel_N, inverse=True)
     self.deconv3 = nn.ConvTranspose2d(out_channel_N,
                                       3,
                                       9,
                                       stride=4,
                                       padding=4,
                                       output_padding=3)
Example #14
0
    def __init__(self, N=192, M=192, **kwargs):
        super().__init__(entropy_bottleneck_channels=N, **kwargs)

        self.g_a = nn.Sequential(
            conv(3, N, kernel_size=5, stride=2),
            GDN(N),
            conv(N, N, kernel_size=5, stride=2),
            GDN(N),
            conv(N, N, kernel_size=5, stride=2),
            GDN(N),
            conv(N, M, kernel_size=5, stride=2),
        )

        self.g_s = nn.Sequential(
            deconv(M, N, kernel_size=5, stride=2),
            GDN(N, inverse=True),
            deconv(N, N, kernel_size=5, stride=2),
            GDN(N, inverse=True),
            deconv(N, N, kernel_size=5, stride=2),
            GDN(N, inverse=True),
            deconv(N, 3, kernel_size=5, stride=2),
        )

        self.h_a = nn.Sequential(
            conv(M, N, stride=1, kernel_size=3),
            nn.LeakyReLU(inplace=True),
            conv(N, N, stride=2, kernel_size=5),
            nn.LeakyReLU(inplace=True),
            conv(N, N, stride=2, kernel_size=5),
        )

        self.h_s = nn.Sequential(
            deconv(N, M, stride=2, kernel_size=5),
            nn.LeakyReLU(inplace=True),
            deconv(M, M * 3 // 2, stride=2, kernel_size=5),
            nn.LeakyReLU(inplace=True),
            conv(M * 3 // 2, M * 2, stride=1, kernel_size=3),
        )

        self.entropy_parameters = nn.Sequential(
            nn.Conv2d(M * 12 // 3, M * 10 // 3, 1),
            nn.LeakyReLU(inplace=True),
            nn.Conv2d(M * 10 // 3, M * 8 // 3, 1),
            nn.LeakyReLU(inplace=True),
            nn.Conv2d(M * 8 // 3, M * 6 // 3, 1),
        )

        self.context_prediction = MaskedConv2d(M,
                                               2 * M,
                                               kernel_size=5,
                                               padding=2,
                                               stride=1)

        self.gaussian_conditional = GaussianConditional(None)
        self.N = int(N)
        self.M = int(M)
Example #15
0
    def __init__(self, N, M, **kwargs):
        super().__init__(entropy_bottleneck_channels=N, **kwargs)

        self.g_a = nn.Sequential(
            conv(3, N),
            GDN(N),
            conv(N, N),
            GDN(N),
            conv(N, N),
            GDN(N),
            conv(N, M),
        )

        self.g_s = nn.Sequential(
            deconv(M, N),
            GDN(N, inverse=True),
            deconv(N, N),
            GDN(N, inverse=True),
            deconv(N, N),
            GDN(N, inverse=True),
            deconv(N, 3),
        )

        self.h_a = nn.Sequential(
            conv(M, N, stride=1, kernel_size=3),
            nn.ReLU(inplace=True),
            conv(N, N),
            nn.ReLU(inplace=True),
            conv(N, N),
        )

        self.h_s = nn.Sequential(
            deconv(N, N),
            nn.ReLU(inplace=True),
            deconv(N, N),
            nn.ReLU(inplace=True),
            conv(N, M, stride=1, kernel_size=3),
            nn.ReLU(inplace=True),
        )

        self.gaussian_conditional = GaussianConditional(None)
        self.N = int(N)
        self.M = int(M)
Example #16
0
    def __init__(self,
                 N=128,
                 M=192,
                 F=21,
                 C=32,
                 K=5,
                 **kwargs):  #'cuda:0' or 'cpu'
        super().__init__(entropy_bottleneck_channels=N, **kwargs)
        # super(DSIC, self).__init__()
        # self.entropy_bottleneck1 = CompressionModel(entropy_bottleneck_channels=N)
        # self.entropy_bottleneck2 = CompressionModel(entropy_bottleneck_channels=N)
        self.gaussian1 = GaussianMixtureConditional(K=K)
        self.gaussian2 = GaussianMixtureConditional(K=K)
        self.N = int(N)
        self.M = int(M)
        self.F = F
        self.C = C
        self.K = K
        #定义组件
        self.encoder1 = Encoder1(N, M)
        # self.encoder2 = Encoder2(N,M)
        self.decoder1 = Decoder1(N, M)
        # self.decoder2 = Decoder2(N,M)
        # pic2 需要的组件
        self.pic2_g_a_conv1 = conv(3, N)
        self.pic2_g_a_gdn1 = GDN(N)
        self.pic2_g_a_conv2 = conv(2 * N, N)
        self.pic2_g_a_gdn2 = GDN(N)
        self.pic2_g_a_conv3 = conv(2 * N, N)
        self.pic2_g_a_gdn3 = GDN(N)
        self.pic2_g_a_conv4 = conv(2 * N, M)
        #
        self.pic2_g_s_conv1 = deconv(M, N)
        self.pic2_g_s_gdn1 = GDN(N, inverse=True)
        self.pic2_g_s_conv2 = deconv(2 * N, N)
        self.pic2_g_s_gdn2 = GDN(N, inverse=True)
        self.pic2_g_s_conv3 = deconv(2 * N, N)
        self.pic2_g_s_gdn3 = GDN(N, inverse=True)
        self.pic2_g_s_conv4 = deconv(2 * N, 3)
        #end of pic2
        #######
        self._global_context = global_context(M, F, C)

        #scale_factor 超分辨几倍 (from H,W/16)
        self._cost_volume1 = cost_volume(N, 8, F, C)  #最外层
        self._cost_volume2 = cost_volume(N, 4, F, C)
        self._cost_volume3 = cost_volume(N, 2, F, C)  #最里层
        self._cost_volume4 = cost_volume(N, 2, F, C)  # 最里层
        self._cost_volume5 = cost_volume(N, 4, F, C)
        self._cost_volume6 = cost_volume(N, 8, F, C)  # 最外层

        self._warp1 = dense_warp()
        self._warp2 = dense_warp()
        self._warp3 = dense_warp()
        self._warp4 = dense_warp()
        self._warp5 = dense_warp()
        self._warp6 = dense_warp()

        #hyper
        self._h_a1 = encode_hyper(N=N, M=M)
        self._h_a2 = encode_hyper(N=N, M=M)
        self._h_s1 = gmm_hyper_y1(N=N, M=M, K=K)
        self._h_s2 = gmm_hyper_y2(N=N, M=M, K=K)